comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
It would be nice if `parseNodes` changed name to reflect its new usage :)
private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); List<NodeSelection> groups = parseNodes(nodesAndGroup.getSecond()); return new SearchPath(nodes, groups); }
List<NodeSelection> groups = parseNodes(nodesAndGroup.getSecond());
private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroups = halveAt('/', element); List<Selection> nodes = parseSelection(nodesAndGroups.getFirst()); List<Selection> groups = parseSelection(nodesAndGroups.getSecond()); return new SearchPath(nodes, groups); }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<NodeSelection> nodes; private final List<NodeSelection> groups; private static final Random random = new Random(); private SearchPath(List<NodeSelection> nodes, List<NodeSelection> group) { this.nodes = nodes; this.groups = group; } private List<Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && groups.isEmpty(); } private Group selectRandomGroupWithSufficientCoverage(SearchCluster cluster, List<Integer> groupIds) { for (int numRetries = 0; numRetries < groupIds.size()*2; numRetries++) { int index = random.nextInt(groupIds.size()); int groupId = groupIds.get(index); Optional<Group> group = cluster.group(groupId); if (group.isPresent()) { if (group.get().hasSufficientCoverage()) { return group.get(); } } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (groupId + 1) + " groups"); } } return cluster.groups().values().iterator().next(); } private Group selectGroup(SearchCluster cluster) { if ( ! groups.isEmpty()) { List<Integer> potentialGroups = new ArrayList<>(); for (NodeSelection groupSelection : groups) { for (int group = groupSelection.from; group < groupSelection.to; group++) { potentialGroups.add(group); } } return selectRandomGroupWithSufficientCoverage(cluster, potentialGroups); } return selectRandomGroupWithSufficientCoverage(cluster, cluster.groups().keySet().asList()); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$|/$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); int start = Integer.parseInt(m.group(1)); int end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1)); } return new Pair<>(string, ""); } private static void selectionToString(StringBuilder sb, List<NodeSelection> nodes) { boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); selectionToString(sb, nodes); if ( ! groups.isEmpty()) { sb.append('/'); selectionToString(sb, groups); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = Math.min(to, max); return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<Selection> nodes; private final List<Selection> groups; private static final Random random = new Random(); private SearchPath(List<Selection> nodes, List<Selection> groups) { this.nodes = nodes; this.groups = groups; } private List<Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (Selection node : nodes) { wanted.addAll(node.matches(max)); } List<Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && groups.isEmpty(); } private Group selectRandomGroupWithSufficientCoverage(SearchCluster cluster, List<Integer> groupIds) { while ( ! groupIds.isEmpty()) { int index = random.nextInt(groupIds.size()); int groupId = groupIds.get(index); Optional<Group> group = cluster.group(groupId); if (group.isPresent()) { if (group.get().hasSufficientCoverage()) { return group.get(); } else { groupIds.remove(index); } } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (groupId + 1) + " groups"); } } return cluster.groups().values().iterator().next(); } private Group selectGroup(SearchCluster cluster) { if ( ! groups.isEmpty()) { List<Integer> potentialGroups = new ArrayList<>(); for (Selection groupSelection : groups) { for (int group = groupSelection.from; group < groupSelection.to; group++) { potentialGroups.add(group); } } return selectRandomGroupWithSufficientCoverage(cluster, potentialGroups); } return selectRandomGroupWithSufficientCoverage(cluster, new ArrayList<>(cluster.groups().keySet())); } private static List<Selection> parseSelection(String nodes) { List<Selection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$|/$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<Selection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); int start = Integer.parseInt(m.group(1)); int end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Selection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<Selection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Selection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1)); } return new Pair<>(string, ""); } private static void selectionToString(StringBuilder sb, List<Selection> nodes) { boolean first = true; for (Selection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); selectionToString(sb, nodes); if ( ! groups.isEmpty()) { sb.append('/'); selectionToString(sb, groups); } return sb.toString(); } private static class Selection { private final int from; private final int to; Selection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = Math.min(to, max); return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
Consider extracting retries as a constant, and include it in the exception message.
public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; for (int i = 0; i < 4; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up trying to fetch an up to date node under lock: " + node.hostname()); }
for (int i = 0; i < 4; ++i) {
public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Yes, done.
public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; for (int i = 0; i < 4; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up trying to fetch an up to date node under lock: " + node.hostname()); }
Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state());
public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
parseSelection it is. NodeSelection renamed to Selection.
private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); List<NodeSelection> groups = parseNodes(nodesAndGroup.getSecond()); return new SearchPath(nodes, groups); }
List<NodeSelection> groups = parseNodes(nodesAndGroup.getSecond());
private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroups = halveAt('/', element); List<Selection> nodes = parseSelection(nodesAndGroups.getFirst()); List<Selection> groups = parseSelection(nodesAndGroups.getSecond()); return new SearchPath(nodes, groups); }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<NodeSelection> nodes; private final List<NodeSelection> groups; private static final Random random = new Random(); private SearchPath(List<NodeSelection> nodes, List<NodeSelection> group) { this.nodes = nodes; this.groups = group; } private List<Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && groups.isEmpty(); } private Group selectRandomGroupWithSufficientCoverage(SearchCluster cluster, List<Integer> groupIds) { for (int numRetries = 0; numRetries < groupIds.size()*2; numRetries++) { int index = random.nextInt(groupIds.size()); int groupId = groupIds.get(index); Optional<Group> group = cluster.group(groupId); if (group.isPresent()) { if (group.get().hasSufficientCoverage()) { return group.get(); } } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (groupId + 1) + " groups"); } } return cluster.groups().values().iterator().next(); } private Group selectGroup(SearchCluster cluster) { if ( ! groups.isEmpty()) { List<Integer> potentialGroups = new ArrayList<>(); for (NodeSelection groupSelection : groups) { for (int group = groupSelection.from; group < groupSelection.to; group++) { potentialGroups.add(group); } } return selectRandomGroupWithSufficientCoverage(cluster, potentialGroups); } return selectRandomGroupWithSufficientCoverage(cluster, cluster.groups().keySet().asList()); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$|/$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); int start = Integer.parseInt(m.group(1)); int end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1)); } return new Pair<>(string, ""); } private static void selectionToString(StringBuilder sb, List<NodeSelection> nodes) { boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); selectionToString(sb, nodes); if ( ! groups.isEmpty()) { sb.append('/'); selectionToString(sb, groups); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = Math.min(to, max); return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<Selection> nodes; private final List<Selection> groups; private static final Random random = new Random(); private SearchPath(List<Selection> nodes, List<Selection> groups) { this.nodes = nodes; this.groups = groups; } private List<Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (Selection node : nodes) { wanted.addAll(node.matches(max)); } List<Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && groups.isEmpty(); } private Group selectRandomGroupWithSufficientCoverage(SearchCluster cluster, List<Integer> groupIds) { while ( ! groupIds.isEmpty()) { int index = random.nextInt(groupIds.size()); int groupId = groupIds.get(index); Optional<Group> group = cluster.group(groupId); if (group.isPresent()) { if (group.get().hasSufficientCoverage()) { return group.get(); } else { groupIds.remove(index); } } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (groupId + 1) + " groups"); } } return cluster.groups().values().iterator().next(); } private Group selectGroup(SearchCluster cluster) { if ( ! groups.isEmpty()) { List<Integer> potentialGroups = new ArrayList<>(); for (Selection groupSelection : groups) { for (int group = groupSelection.from; group < groupSelection.to; group++) { potentialGroups.add(group); } } return selectRandomGroupWithSufficientCoverage(cluster, potentialGroups); } return selectRandomGroupWithSufficientCoverage(cluster, new ArrayList<>(cluster.groups().keySet())); } private static List<Selection> parseSelection(String nodes) { List<Selection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$|/$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<Selection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); int start = Integer.parseInt(m.group(1)); int end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Selection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<Selection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Selection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1)); } return new Pair<>(string, ""); } private static void selectionToString(StringBuilder sb, List<Selection> nodes) { boolean first = true; for (Selection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); selectionToString(sb, nodes); if ( ! groups.isEmpty()) { sb.append('/'); selectionToString(sb, groups); } return sb.toString(); } private static class Selection { private final int from; private final int to; Selection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = Math.min(to, max); return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
Removed List.copyOf().
public List<Node> nodes() { return List.copyOf(nodes.values().stream().map(NodeMutex::node).collect(Collectors.toList())); }
return List.copyOf(nodes.values().stream().map(NodeMutex::node).collect(Collectors.toList()));
public List<Node> nodes() { return nodes.values().stream().map(NodeMutex::node).collect(Collectors.toList()); }
class PatchedNodes implements Mutex { private final Map<String, NodeMutex> nodes = new HashMap<>(); private final String hostname; private boolean fetchedChildren; private PatchedNodes(NodeMutex nodeMutex) { this.hostname = nodeMutex.node().hostname(); nodes.put(hostname, nodeMutex); fetchedChildren = !nodeMutex.node().type().isHost(); } public NodeMutex nodeMutex() { return nodes.get(hostname); } public Node node() { return nodeMutex().node(); } public List<Node> children() { if (!fetchedChildren) { memoizedNodes.get() .childrenOf(hostname) .forEach(node -> nodeRepository.lockAndGet(node) .ifPresent(nodeMutex -> nodes.put(nodeMutex.node().hostname(), nodeMutex))); fetchedChildren = true; } return nodes.values().stream() .map(NodeMutex::node) .filter(node -> !node.type().isHost()) .collect(Collectors.toList()); } public void update(Node node) { NodeMutex currentNodeMutex = nodes.get(node.hostname()); if (currentNodeMutex == null) { throw new IllegalStateException("unable to update non-existing child: " + node.hostname()); } nodes.put(node.hostname(), currentNodeMutex.with(node)); } @Override public void close() { nodes.values().forEach(NodeMutex::close); } }
class PatchedNodes implements Mutex { private final Map<String, NodeMutex> nodes = new HashMap<>(); private final String hostname; private boolean fetchedChildren; private PatchedNodes(NodeMutex nodeMutex) { this.hostname = nodeMutex.node().hostname(); nodes.put(hostname, nodeMutex); fetchedChildren = !nodeMutex.node().type().isHost(); } public NodeMutex nodeMutex() { return nodes.get(hostname); } public Node node() { return nodeMutex().node(); } public List<Node> children() { if (!fetchedChildren) { memoizedNodes.get() .childrenOf(hostname) .forEach(node -> nodeRepository.lockAndGet(node) .ifPresent(nodeMutex -> nodes.put(nodeMutex.node().hostname(), nodeMutex))); fetchedChildren = true; } return nodes.values().stream() .map(NodeMutex::node) .filter(node -> !node.type().isHost()) .collect(Collectors.toList()); } public void update(Node node) { NodeMutex currentNodeMutex = nodes.get(node.hostname()); if (currentNodeMutex == null) { throw new IllegalStateException("unable to update non-existing child: " + node.hostname()); } nodes.put(node.hostname(), currentNodeMutex.with(node)); } @Override public void close() { nodes.values().forEach(NodeMutex::close); } }
Done
public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; for (int i = 0; i < 4; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up trying to fetch an up to date node under lock: " + node.hostname()); }
for (int i = 0; i < 4; ++i) {
public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if ( ! node.allocation().isPresent()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { return db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested()); } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> setDirty(node, agent, reason)); } /** * Set a node dirty, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node setDirty(Node node, Agent agent, String reason) { return db.writeTo(State.dirty, node, agent, Optional.of(reason)); } public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> setDirty(node, agent, reason)).collect(Collectors.toList()); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason)); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, node, agent, reason); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Should include the unit (millis) in the name of timestamps, like `lastDeploymentToDevMillis`.
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) { List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name()); Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> controller.jobController().jobs(instance.id()).stream() .filter(jobType -> jobType.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder()); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDev", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProd", instant.toEpochMilli())); }
lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDev", instant.toEpochMilli()));
private void tenantMetaDataToSlime(Tenant tenant, Cursor object) { List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name()); Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> controller.jobController().jobs(instance.id()).stream() .filter(jobType -> jobType.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder()); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.latestVersion().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); }
class ApplicationApiHandler extends LoggingRequestHandler { private static final String OPTIONAL_PREFIX = "/api"; private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri(), OPTIONAL_PREFIX); switch (request.getMethod()) { case GET: return handleGET(path, request); case PUT: return handlePUT(path, request); case POST: return handlePOST(path, request); case PATCH: return handlePATCH(path, request); case DELETE: return handleDELETE(path, request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { switch (e.getErrorCode()) { case NOT_FOUND: return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e)); default: return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e)); } } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request); return new SlimeJsonResponse(slime); } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("invoiceEmail", info.invoiceEmail()); infoCursor.setString("contactName", info.contactName()); infoCursor.setString("contactEmail", info.contactEmail()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); } return new SlimeJsonResponse(slime); } private void toSlime(TenantInfoAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.addressLines()); addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.stateRegionProvince()); addressCursor.setString("country", address.country()); } private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.name()); addressCursor.setString("email", billingContact.email()); addressCursor.setString("phone", billingContact.phone()); toSlime(billingContact.address(), addressCursor); } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { return field.valid() ? field.asString() : defaultVale; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); TenantInfo mergedInfo = TenantInfo.EMPTY .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.email())) .withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail())) .withContactName(getString(insp.field("contactName"), oldInfo.contactName())) .withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact())); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) { if (!insp.valid()) return oldAddress; return TenantInfoAddress.EMPTY .withCountry(getString(insp.field("country"), oldAddress.country())) .withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince())) .withCity(getString(insp.field("city"), oldAddress.city())) .withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip())) .withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines())); } private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) { if (!insp.valid()) return oldContact; return TenantInfoBillingContact.EMPTY .withName(getString(insp.field("name"), oldContact.name())) .withEmail(getString(insp.field("email"), oldContact.email())) .withPhone(getString(insp.field("phone"), oldContact.phone())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); if (controller.tenants().get(tenantName).isEmpty()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) { if (applicationName.map(application.id().application().value()::equals).orElse(true)) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { if ( ! type.environment().isManuallyDeployed()) throw new IllegalArgumentException("Only manually deployed zones have dev packages"); ZoneId zone = type.zone(controller.system()); byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value()); long buildNumber; var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> { try { return Long.parseLong(build); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid build number", e); } }); if (requestedBuild.isEmpty()) { var application = controller.applications().requireApplication(tenantAndApplication); var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty()); if (latestBuild.isEmpty()) { throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'"); } buildNumber = latestBuild.getAsLong(); } else { buildNumber = requestedBuild.get(); } var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber); var filename = tenantAndApplication + "-build" + buildNumber + ".zip"; if (applicationPackage.isEmpty()) { throw new NotExistsException("No application package found for '" + tenantAndApplication + "' with build number " + buildNumber); } return new ZipResponse(filename, applicationPackage.get()); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName) { Slime slime = new Slime(); slime.setObject().setString("compileVersion", compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); nodeObject.setString("flavor", node.flavor()); toSlime(node.resources(), nodeObject); nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); toSlime(cluster.current(), clusterObject.setObject("current")); if (cluster.target().isPresent() && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers())) toSlime(cluster.target().get(), clusterObject.setObject("target")); cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested"))); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { switch (state) { case failed: return "failed"; case parked: return "parked"; case dirty: return "dirty"; case ready: return "ready"; case active: return "active"; case inactive: return "inactive"; case reserved: return "reserved"; case provisioned: return "provisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); } } private static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case unorchestrated: return "unorchestrated"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); } } private static String valueOf(Node.ClusterType type) { switch (type) { case admin: return "admin"; case content: return "content"; case container: return "container"; case combined: return "combined"; default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); } } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { switch (diskSpeed) { case fast : return "fast"; case slow : return "slow"; case any : return "any"; default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'"); } } private static String valueOf(NodeResources.StorageType storageType) { switch (storageType) { case remote : return "remote"; case local : return "local"; case any : return "any"; default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'"); } } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { logStream.transferTo(outputStream); } }; } private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment); return buildResponseFromProtonMetrics(protonMetrics); } private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) { try { var jsonObject = new JSONObject(); var jsonArray = new JSONArray(); for (ProtonMetrics metrics : protonMetrics) { jsonArray.put(metrics.toJson()); } jsonObject.put("metrics", jsonArray); return new JsonResponse(200, jsonObject.toString()); } catch (JSONException e) { log.severe("Unable to build JsonResponse with Proton data"); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests) .stream().map(job -> job.type().jobName()).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion"))); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name())); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(deploymentSpec.requireInstance(instance.name())) .sortedJobs(status.instanceJobs(instance.name()).values()); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name())); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } globalEndpointsToSlime(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> new DeploymentSteps(spec, controller::system)) .map(steps -> steps.sortedDeployments(instance.deployments().values())) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void globalEndpointsToSlime(Cursor object, Instance instance) { var globalEndpointUrls = new LinkedHashSet<String>(); controller.routing().endpointsOf(instance.id()) .requiresRotation() .not().legacy() .asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalEndpointUrls::add); var globalRotationsArray = object.setArray("globalRotations"); globalEndpointUrls.forEach(globalRotationsArray::addString); instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { com.yahoo.vespa.hosted.controller.Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.latestVersion().ifPresent(version -> { sourceRevisionToSlime(version.source(), object.setObject("source")); version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec().requireInstance(instance.name())) .sortedJobs(status.instanceJobs(instance.name()).values()); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name())); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); globalEndpointsToSlime(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> new DeploymentSteps(spec, controller::system)) .map(steps -> steps.sortedDeployments(instance.deployments().values())) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()) .map(job -> job.type().zone(controller.system())) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId) .scope(Endpoint.Scope.zone) .not().legacy(); for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) { toSlime(endpoint, endpointArray.addObject()); } EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance()) .not().legacy() .targets(deploymentId.zoneId()); for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); JobType.from(controller.system(), deployment.zone()) .map(type -> new JobId(instance.id(), type)) .map(status.jobSteps()::get) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.applicationVersionToSlime( response.setObject("applicationVersion"), deployment.applicationVersion()); if (!status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true)) response.setString("status", "pending"); else response.setString("status", "running"); }); } Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if ( ! applicationVersion.isUnknown()) { object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong()); object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit)); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if (revision.isEmpty()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } /** * Returns a non-broken, released version at least as old as the oldest platform the given application is on. * * If no known version is applicable, the newest version at least as old as the oldest platform is selected, * among all versions released for this system. If no such versions exists, throws an IllegalStateException. */ private Version compileVersion(TenantAndApplicationId id) { Version oldestPlatform = controller.applications().oldestInstalledPlatform(id); VersionStatus versionStatus = controller.readVersionStatus(); return versionStatus.versions().stream() .filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low)) .filter(VespaVersion::isReleased) .map(VespaVersion::versionNumber) .filter(version -> ! version.isAfter(oldestPlatform)) .max(Comparator.naturalOrder()) .orElseGet(() -> controller.mavenRepository().metadata().versions().stream() .filter(version -> ! version.isAfter(oldestPlatform)) .filter(version -> ! versionStatus.versions().stream() .map(VespaVersion::versionNumber) .collect(Collectors.toSet()).contains(version)) .max(Comparator.naturalOrder()) .orElseThrow(() -> new IllegalStateException("No available releases of " + controller.mavenRepository().artifactId()))); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } var deploymentId = new DeploymentId(instance.id(), zone); setGlobalRotationStatus(deploymentId, inService, request); setGlobalEndpointStatus(deploymentId, inService, request); return new MessageResponse(String.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } /** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */ private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) { var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant; var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out; controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent); } /** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */ private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) { var requestData = toSlime(request.getData()).get(); var reason = mandatory("reason", requestData).asString(); var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant; long timestamp = controller.clock().instant().getEpochSecond(); var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp); controller.routing().setGlobalRotationStatus(deployment, endpointStatus); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); controller.routing().globalRotationStatus(deploymentId) .forEach((endpoint, status) -> { array.addString(endpoint.upstreamIdOf(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.getStatus().name()); statusObject.setString("reason", status.getReason() == null ? "" : status.getReason()); statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent()); statusObject.setLong("timestamp", status.getEpoch()); }); return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse metering(String tenant, String application, HttpRequest request) { Slime slime = new Slime(); Cursor root = slime.setObject(); MeteringData meteringData = controller.serviceRegistry() .meteringService() .getMeteringData(TenantName.from(tenant), ApplicationName.from(application)); ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot(); Cursor currentRate = root.setObject("currentrate"); currentRate.setDouble("cpu", currentSnapshot.getCpuCores()); currentRate.setDouble("mem", currentSnapshot.getMemoryGb()); currentRate.setDouble("disk", currentSnapshot.getDiskGb()); ResourceAllocation thisMonth = meteringData.getThisMonth(); Cursor thismonth = root.setObject("thismonth"); thismonth.setDouble("cpu", thisMonth.getCpuCores()); thismonth.setDouble("mem", thisMonth.getMemoryGb()); thismonth.setDouble("disk", thisMonth.getDiskGb()); ResourceAllocation lastMonth = meteringData.getLastMonth(); Cursor lastmonth = root.setObject("lastmonth"); lastmonth.setDouble("cpu", lastMonth.getCpuCores()); lastmonth.setDouble("mem", lastMonth.getMemoryGb()); lastmonth.setDouble("disk", lastMonth.getDiskGb()); Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory(); Cursor details = root.setObject("details"); Cursor detailsCpu = details.setObject("cpu"); Cursor detailsMem = details.setObject("mem"); Cursor detailsDisk = details.setObject("disk"); history.forEach((applicationId, resources) -> { String instanceName = applicationId.instance().value(); Cursor detailsCpuApp = detailsCpu.setObject(instanceName); Cursor detailsMemApp = detailsMem.setObject(instanceName); Cursor detailsDiskApp = detailsDisk.setObject(instanceName); Cursor detailsCpuData = detailsCpuApp.setArray("data"); Cursor detailsMemData = detailsMemApp.setArray("data"); Cursor detailsDiskData = detailsDiskApp.setArray("data"); resources.forEach(resourceSnapshot -> { Cursor cpu = detailsCpuData.addObject(); cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); cpu.setDouble("value", resourceSnapshot.getCpuCores()); Cursor mem = detailsMemData.addObject(); mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); mem.setDouble("value", resourceSnapshot.getMemoryGb()); Cursor disk = detailsDiskData.addObject(); disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); disk.setDouble("value", resourceSnapshot.getDiskGb()); }); }); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id())); root.setBool("pinned", instance.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ZoneId zone = requireZone(environment, region); ServiceApiResponse response = new ServiceApiResponse(zone, new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(zone), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) { String[] parts = restPath.split("/status/"); String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]); return new HtmlResponse(result); } Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(), deploymentId.applicationId(), controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { request = controller.auditLogger().log(request); String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if (!versionStatus.isActive(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { controller.auditLogger().log(request); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = Change.of(application.get().latestVersion().get()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .collect(toUnmodifiableList()); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .collect(toUnmodifiableList()); controller.applications().reindex(id, zone, clusterNames, documentTypes); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)))); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); setStatus(root.setObject("status"), reindexing.common()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); cluster.getValue().common().ifPresent(common -> setStatus(clusterObject.setObject("status"), common)); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); } private static String toString(ApplicationReindexing.State state) { switch (state) { case PENDING: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: return null; } } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone(controller.system())), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); controller.jobController().deploy(id, type, version, applicationPackage); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); /* * Special handling of the proxy application (the only system application with an application package) * Setting any other deployOptions here is not supported for now (e.g. specifying version), but * this might be handy later to handle emergency downgrades. */ boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId); if (isZoneApplication) { String versionStr = deployOptions.field("vespaVersion").asString(); boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null"); if (versionPresent) { throw new RuntimeException("Version not supported for system applications"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } /* * Normal applications from here */ Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId)); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId, applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) { Optional<Deployment> deployment = controller.applications().getInstance(applicationId) .map(Instance::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(deployment.isEmpty()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId, applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(), Optional.of(applicationId.instance()), Optional.of(zone), aPackage, Optional.of(requireUserPrincipal(request)))); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().get(tenantName); if (tenant.isEmpty()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); controller.tenants().delete(tenant.get().name(), accessControlRequests.credentials(tenant.get().name(), toSlime(request.getData()).get(), request.getJDiscRequest())); return tenant(tenant.get(), request); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance. */ private HttpResponse testConfig(ApplicationId id, JobType type) { ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(defaultInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(defaultInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); var testedZone = type.zone(controller.system()); if ( ! type.isProduction()) deployments.add(new DeploymentId(id, testedZone)); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, controller.routing().zoneEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name()); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name()); var usedQuota = applications.stream() .map(com.yahoo.vespa.hosted.controller.Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(tenantQuota, usedQuota, object.setObject("quota")); break; } default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (com.yahoo.vespa.hosted.controller.Application application : applications) { DeploymentStatus status = controller.jobController().deploymentStatus(application); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), instance, status, request); else toSlime(instance.id(), applicationArray.addObject(), request); } } private void toSlime(Quota quota, QuotaUsage usage, Cursor object) { quota.budget().ifPresentOrElse( budget -> object.setDouble("budget", budget.doubleValue()), () -> object.setNix("budget") ); object.setDouble("budgetUsed", usage.rate()); quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize)); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); if ( ! controller.zoneRegistry().system().isPublic()) object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / 3.0) / 100.0); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); tenantMetaDataToSlime(tenant, metaData); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(Run run, Cursor object) { object.setLong("id", run.id().number()); object.setString("version", run.versions().targetPlatform().toFullString()); if ( ! run.versions().targetApplication().isUnknown()) toSlime(run.versions().targetApplication(), object.setObject("revision")); object.setString("reason", "unknown reason"); object.setLong("at", run.end().orElse(run.start()).toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static String tenantType(Tenant tenant) { switch (tenant.type()) { case athenz: return "ATHENS"; case cloud: return "CLOUD"; default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, sourceUrl, projectId, applicationPackage, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, Optional.empty(), Optional.empty(), Optional.empty(), 1, ApplicationPackage.deploymentRemoval(), new byte[0]); return new MessageResponse("All deployments removed"); } private ZoneId requireZone(String environment, String region) { ZoneId zone = ZoneId.from(environment, region); if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { switch (state) { case in: return "IN"; case out: return "OUT"; } return "UNKNOWN"; } private static String endpointScopeString(Endpoint.Scope scope) { switch (scope) { case region: return "region"; case global: return "global"; case zone: return "zone"; } throw new IllegalArgumentException("Unknown endpoint scope " + scope); } private static String routingMethodString(RoutingMethod method) { switch (method) { case exclusive: return "exclusive"; case shared: return "shared"; case sharedLayer4: return "sharedLayer4"; } throw new IllegalArgumentException("Unknown routing method " + method); } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } }
class ApplicationApiHandler extends LoggingRequestHandler { private static final String OPTIONAL_PREFIX = "/api"; private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri(), OPTIONAL_PREFIX); switch (request.getMethod()) { case GET: return handleGET(path, request); case PUT: return handlePUT(path, request); case POST: return handlePOST(path, request); case PATCH: return handlePATCH(path, request); case DELETE: return handleDELETE(path, request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { switch (e.getErrorCode()) { case NOT_FOUND: return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e)); default: return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e)); } } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request); return new SlimeJsonResponse(slime); } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("invoiceEmail", info.invoiceEmail()); infoCursor.setString("contactName", info.contactName()); infoCursor.setString("contactEmail", info.contactEmail()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); } return new SlimeJsonResponse(slime); } private void toSlime(TenantInfoAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.addressLines()); addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.stateRegionProvince()); addressCursor.setString("country", address.country()); } private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.name()); addressCursor.setString("email", billingContact.email()); addressCursor.setString("phone", billingContact.phone()); toSlime(billingContact.address(), addressCursor); } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { return field.valid() ? field.asString() : defaultVale; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); TenantInfo mergedInfo = TenantInfo.EMPTY .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.email())) .withInvoiceEmail(getString(insp.field("invoiceEmail"), oldInfo.invoiceEmail())) .withContactName(getString(insp.field("contactName"), oldInfo.contactName())) .withContactEmail(getString(insp.field("contactEmail"), oldInfo.contactName())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBillingContact(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact())); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantInfoAddress updateTenantInfoAddress(Inspector insp, TenantInfoAddress oldAddress) { if (!insp.valid()) return oldAddress; return TenantInfoAddress.EMPTY .withCountry(getString(insp.field("country"), oldAddress.country())) .withStateRegionProvince(getString(insp.field("stateRegionProvince"), oldAddress.stateRegionProvince())) .withCity(getString(insp.field("city"), oldAddress.city())) .withPostalCodeOrZip(getString(insp.field("postalCodeOrZip"), oldAddress.postalCodeOrZip())) .withAddressLines(getString(insp.field("addressLines"), oldAddress.addressLines())); } private TenantInfoBillingContact updateTenantInfoBillingContact(Inspector insp, TenantInfoBillingContact oldContact) { if (!insp.valid()) return oldContact; return TenantInfoBillingContact.EMPTY .withName(getString(insp.field("name"), oldContact.name())) .withEmail(getString(insp.field("email"), oldContact.email())) .withPhone(getString(insp.field("phone"), oldContact.phone())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); if (controller.tenants().get(tenantName).isEmpty()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (com.yahoo.vespa.hosted.controller.Application application : controller.applications().asList(tenant)) { if (applicationName.map(application.id().application().value()::equals).orElse(true)) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { if ( ! type.environment().isManuallyDeployed()) throw new IllegalArgumentException("Only manually deployed zones have dev packages"); ZoneId zone = type.zone(controller.system()); byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value()); long buildNumber; var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> { try { return Long.parseLong(build); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid build number", e); } }); if (requestedBuild.isEmpty()) { var application = controller.applications().requireApplication(tenantAndApplication); var latestBuild = application.latestVersion().map(ApplicationVersion::buildNumber).orElse(OptionalLong.empty()); if (latestBuild.isEmpty()) { throw new NotExistsException("No application package has been submitted for '" + tenantAndApplication + "'"); } buildNumber = latestBuild.getAsLong(); } else { buildNumber = requestedBuild.get(); } var applicationPackage = controller.applications().applicationStore().find(tenantAndApplication.tenant(), tenantAndApplication.application(), buildNumber); var filename = tenantAndApplication + "-build" + buildNumber + ".zip"; if (applicationPackage.isEmpty()) { throw new NotExistsException("No application package found for '" + tenantAndApplication + "' with build number " + buildNumber); } return new ZipResponse(filename, applicationPackage.get()); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName) { Slime slime = new Slime(); slime.setObject().setString("compileVersion", compileVersion(TenantAndApplicationId.from(tenantName, applicationName)).toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private com.yahoo.vespa.hosted.controller.Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); nodeObject.setString("flavor", node.flavor()); toSlime(node.resources(), nodeObject); nodeObject.setBool("fastDisk", node.resources().diskSpeed() == NodeResources.DiskSpeed.fast); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); toSlime(cluster.current(), clusterObject.setObject("current")); if (cluster.target().isPresent() && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers())) toSlime(cluster.target().get(), clusterObject.setObject("target")); cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested"))); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { switch (state) { case failed: return "failed"; case parked: return "parked"; case dirty: return "dirty"; case ready: return "ready"; case active: return "active"; case inactive: return "inactive"; case reserved: return "reserved"; case provisioned: return "provisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); } } private static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case unorchestrated: return "unorchestrated"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); } } private static String valueOf(Node.ClusterType type) { switch (type) { case admin: return "admin"; case content: return "content"; case container: return "container"; case combined: return "combined"; default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); } } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { switch (diskSpeed) { case fast : return "fast"; case slow : return "slow"; case any : return "any"; default: throw new IllegalArgumentException("Unknown disk speed '" + diskSpeed.name() + "'"); } } private static String valueOf(NodeResources.StorageType storageType) { switch (storageType) { case remote : return "remote"; case local : return "local"; case any : return "any"; default: throw new IllegalArgumentException("Unknown storage type '" + storageType.name() + "'"); } } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { logStream.transferTo(outputStream); } }; } private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment); return buildResponseFromProtonMetrics(protonMetrics); } private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) { try { var jsonObject = new JSONObject(); var jsonArray = new JSONArray(); for (ProtonMetrics metrics : protonMetrics) { jsonArray.put(metrics.toJson()); } jsonObject.put("metrics", jsonArray); return new JsonResponse(200, jsonObject.toString()); } catch (JSONException e) { log.severe("Unable to build JsonResponse with Proton data"); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName(), requireTests) .stream().map(job -> job.type().jobName()).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private void toSlime(Cursor object, com.yahoo.vespa.hosted.controller.Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion"))); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name())); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(deploymentSpec.requireInstance(instance.name())) .sortedJobs(status.instanceJobs(instance.name()).values()); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name())); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } globalEndpointsToSlime(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> new DeploymentSteps(spec, controller::system)) .map(steps -> steps.sortedDeployments(instance.deployments().values())) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void globalEndpointsToSlime(Cursor object, Instance instance) { var globalEndpointUrls = new LinkedHashSet<String>(); controller.routing().endpointsOf(instance.id()) .requiresRotation() .not().legacy() .asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalEndpointUrls::add); var globalRotationsArray = object.setArray("globalRotations"); globalEndpointUrls.forEach(globalRotationsArray::addString); instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { com.yahoo.vespa.hosted.controller.Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.latestVersion().ifPresent(version -> { sourceRevisionToSlime(version.source(), object.setObject("source")); version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec().requireInstance(instance.name())) .sortedJobs(status.instanceJobs(instance.name()).values()); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name())); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); globalEndpointsToSlime(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> new DeploymentSteps(spec, controller::system)) .map(steps -> steps.sortedDeployments(instance.deployments().values())) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()) .map(job -> job.type().zone(controller.system())) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId) .scope(Endpoint.Scope.zone) .not().legacy(); for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) { toSlime(endpoint, endpointArray.addObject()); } EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance()) .not().legacy() .targets(deploymentId.zoneId()); for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); JobType.from(controller.system(), deployment.zone()) .map(type -> new JobId(instance.id(), type)) .map(status.jobSteps()::get) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.applicationVersionToSlime( response.setObject("applicationVersion"), deployment.applicationVersion()); if (!status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true)) response.setString("status", "pending"); else response.setString("status", "running"); }); } Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if ( ! applicationVersion.isUnknown()) { object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong()); object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); applicationVersion.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); applicationVersion.commit().ifPresent(commit -> object.setString("commit", commit)); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if (revision.isEmpty()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } /** * Returns a non-broken, released version at least as old as the oldest platform the given application is on. * * If no known version is applicable, the newest version at least as old as the oldest platform is selected, * among all versions released for this system. If no such versions exists, throws an IllegalStateException. */ private Version compileVersion(TenantAndApplicationId id) { Version oldestPlatform = controller.applications().oldestInstalledPlatform(id); VersionStatus versionStatus = controller.readVersionStatus(); return versionStatus.versions().stream() .filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low)) .filter(VespaVersion::isReleased) .map(VespaVersion::versionNumber) .filter(version -> ! version.isAfter(oldestPlatform)) .max(Comparator.naturalOrder()) .orElseGet(() -> controller.mavenRepository().metadata().versions().stream() .filter(version -> ! version.isAfter(oldestPlatform)) .filter(version -> ! versionStatus.versions().stream() .map(VespaVersion::versionNumber) .collect(Collectors.toSet()).contains(version)) .max(Comparator.naturalOrder()) .orElseThrow(() -> new IllegalStateException("No available releases of " + controller.mavenRepository().artifactId()))); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } var deploymentId = new DeploymentId(instance.id(), zone); setGlobalRotationStatus(deploymentId, inService, request); setGlobalEndpointStatus(deploymentId, inService, request); return new MessageResponse(String.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } /** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */ private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) { var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant; var status = inService ? GlobalRouting.Status.in : GlobalRouting.Status.out; controller.routing().policies().setGlobalRoutingStatus(deployment, status, agent); } /** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */ private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) { var requestData = toSlime(request.getData()).get(); var reason = mandatory("reason", requestData).asString(); var agent = isOperator(request) ? GlobalRouting.Agent.operator : GlobalRouting.Agent.tenant; long timestamp = controller.clock().instant().getEpochSecond(); var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp); controller.routing().setGlobalRotationStatus(deployment, endpointStatus); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); controller.routing().globalRotationStatus(deploymentId) .forEach((endpoint, status) -> { array.addString(endpoint.upstreamIdOf(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.getStatus().name()); statusObject.setString("reason", status.getReason() == null ? "" : status.getReason()); statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent()); statusObject.setLong("timestamp", status.getEpoch()); }); return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse metering(String tenant, String application, HttpRequest request) { Slime slime = new Slime(); Cursor root = slime.setObject(); MeteringData meteringData = controller.serviceRegistry() .meteringService() .getMeteringData(TenantName.from(tenant), ApplicationName.from(application)); ResourceAllocation currentSnapshot = meteringData.getCurrentSnapshot(); Cursor currentRate = root.setObject("currentrate"); currentRate.setDouble("cpu", currentSnapshot.getCpuCores()); currentRate.setDouble("mem", currentSnapshot.getMemoryGb()); currentRate.setDouble("disk", currentSnapshot.getDiskGb()); ResourceAllocation thisMonth = meteringData.getThisMonth(); Cursor thismonth = root.setObject("thismonth"); thismonth.setDouble("cpu", thisMonth.getCpuCores()); thismonth.setDouble("mem", thisMonth.getMemoryGb()); thismonth.setDouble("disk", thisMonth.getDiskGb()); ResourceAllocation lastMonth = meteringData.getLastMonth(); Cursor lastmonth = root.setObject("lastmonth"); lastmonth.setDouble("cpu", lastMonth.getCpuCores()); lastmonth.setDouble("mem", lastMonth.getMemoryGb()); lastmonth.setDouble("disk", lastMonth.getDiskGb()); Map<ApplicationId, List<ResourceSnapshot>> history = meteringData.getSnapshotHistory(); Cursor details = root.setObject("details"); Cursor detailsCpu = details.setObject("cpu"); Cursor detailsMem = details.setObject("mem"); Cursor detailsDisk = details.setObject("disk"); history.forEach((applicationId, resources) -> { String instanceName = applicationId.instance().value(); Cursor detailsCpuApp = detailsCpu.setObject(instanceName); Cursor detailsMemApp = detailsMem.setObject(instanceName); Cursor detailsDiskApp = detailsDisk.setObject(instanceName); Cursor detailsCpuData = detailsCpuApp.setArray("data"); Cursor detailsMemData = detailsMemApp.setArray("data"); Cursor detailsDiskData = detailsDiskApp.setArray("data"); resources.forEach(resourceSnapshot -> { Cursor cpu = detailsCpuData.addObject(); cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); cpu.setDouble("value", resourceSnapshot.getCpuCores()); Cursor mem = detailsMemData.addObject(); mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); mem.setDouble("value", resourceSnapshot.getMemoryGb()); Cursor disk = detailsDiskData.addObject(); disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); disk.setDouble("value", resourceSnapshot.getDiskGb()); }); }); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id())); root.setBool("pinned", instance.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ZoneId zone = requireZone(environment, region); ServiceApiResponse response = new ServiceApiResponse(zone, new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(zone), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); if ("container-clustercontroller".equals((serviceName)) && restPath.contains("/status/")) { String[] parts = restPath.split("/status/"); String result = controller.serviceRegistry().configServer().getClusterControllerStatus(deploymentId, parts[0], parts[1]); return new HtmlResponse(result); } Map<?,?> result = controller.serviceRegistry().configServer().getServiceApiResponse(deploymentId, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(deploymentId.zoneId(), deploymentId.applicationId(), controller.zoneRegistry().getConfigServerApiUris(deploymentId.zoneId()), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, String restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, "/" + restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); com.yahoo.vespa.hosted.controller.Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { request = controller.auditLogger().log(request); String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if (!versionStatus.isActive(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { controller.auditLogger().log(request); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = Change.of(application.get().latestVersion().get()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .collect(toUnmodifiableList()); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .collect(toUnmodifiableList()); controller.applications().reindex(id, zone, clusterNames, documentTypes); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)))); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); setStatus(root.setObject("status"), reindexing.common()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); cluster.getValue().common().ifPresent(common -> setStatus(clusterObject.setObject("status"), common)); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); } private static String toString(ApplicationReindexing.State state) { switch (state) { case PENDING: return "pending"; case RUNNING: return "running"; case FAILED: return "failed"; case SUCCESSFUL: return "successful"; default: return null; } } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::from)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone(controller.system())), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); controller.jobController().deploy(id, type, version, applicationPackage); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); /* * Special handling of the proxy application (the only system application with an application package) * Setting any other deployOptions here is not supported for now (e.g. specifying version), but * this might be handy later to handle emergency downgrades. */ boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId); if (isZoneApplication) { String versionStr = deployOptions.field("vespaVersion").asString(); boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null"); if (versionPresent) { throw new RuntimeException("Version not supported for system applications"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } /* * Normal applications from here */ Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Optional<com.yahoo.vespa.hosted.controller.Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId)); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId, applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) { Optional<Deployment> deployment = controller.applications().getInstance(applicationId) .map(Instance::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(deployment.isEmpty()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId, applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(), Optional.of(applicationId.instance()), Optional.of(zone), aPackage, Optional.of(requireUserPrincipal(request)))); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().get(tenantName); if (tenant.isEmpty()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); controller.tenants().delete(tenant.get().name(), accessControlRequests.credentials(tenant.get().name(), toSlime(request.getData()).get(), request.getJDiscRequest())); return tenant(tenant.get(), request); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance. */ private HttpResponse testConfig(ApplicationId id, JobType type) { ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(defaultInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(defaultInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); var testedZone = type.zone(controller.system()); if ( ! type.isProduction()) deployments.add(new DeploymentId(id, testedZone)); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, controller.routing().zoneEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); List<com.yahoo.vespa.hosted.controller.Application> applications = controller.applications().asList(tenant.name()); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); var tenantQuota = controller.serviceRegistry().billingController().getQuota(tenant.name()); var usedQuota = applications.stream() .map(com.yahoo.vespa.hosted.controller.Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(tenantQuota, usedQuota, object.setObject("quota")); break; } default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (com.yahoo.vespa.hosted.controller.Application application : applications) { DeploymentStatus status = controller.jobController().deploymentStatus(application); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), instance, status, request); else toSlime(instance.id(), applicationArray.addObject(), request); } } private void toSlime(Quota quota, QuotaUsage usage, Cursor object) { quota.budget().ifPresentOrElse( budget -> object.setDouble("budget", budget.doubleValue()), () -> object.setNix("budget") ); object.setDouble("budgetUsed", usage.rate()); quota.maxClusterSize().ifPresent(maxClusterSize -> object.setLong("clusterSize", maxClusterSize)); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); if ( ! controller.zoneRegistry().system().isPublic()) object.setDouble("cost", Math.round(resources.nodes() * resources.nodeResources().cost() * 100.0 / 3.0) / 100.0); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); tenantMetaDataToSlime(tenant, metaData); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(Run run, Cursor object) { object.setLong("id", run.id().number()); object.setString("version", run.versions().targetPlatform().toFullString()); if ( ! run.versions().targetApplication().isUnknown()) toSlime(run.versions().targetApplication(), object.setObject("revision")); object.setString("reason", "unknown reason"); object.setLong("at", run.end().orElse(run.start()).toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static String tenantType(Tenant tenant) { switch (tenant.type()) { case athenz: return "ATHENS"; case cloud: return "CLOUD"; default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, sourceUrl, projectId, applicationPackage, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, Optional.empty(), Optional.empty(), Optional.empty(), 1, ApplicationPackage.deploymentRemoval(), new byte[0]); return new MessageResponse("All deployments removed"); } private ZoneId requireZone(String environment, String region) { ZoneId zone = ZoneId.from(environment, region); if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { switch (state) { case in: return "IN"; case out: return "OUT"; } return "UNKNOWN"; } private static String endpointScopeString(Endpoint.Scope scope) { switch (scope) { case region: return "region"; case global: return "global"; case zone: return "zone"; } throw new IllegalArgumentException("Unknown endpoint scope " + scope); } private static String routingMethodString(RoutingMethod method) { switch (method) { case exclusive: return "exclusive"; case shared: return "shared"; case sharedLayer4: return "sharedLayer4"; } throw new IllegalArgumentException("Unknown routing method " + method); } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } }
XD
public boolean isGroupWellBalanced(OptionalInt groupId) { if (groupId.isEmpty()) return false; Group group = groups().get(groupId.getAsInt()); return (group != null) && group.isContentWellBalanced(); }
Group group = groups().get(groupId.getAsInt());
public boolean isGroupWellBalanced(OptionalInt groupId) { if (groupId.isEmpty()) return false; Group group = groups().get(groupId.getAsInt()); return (group != null) && group.isContentWellBalanced(); }
class PongCallback implements PongHandler { private final ClusterMonitor<Node> clusterMonitor; private final Node node; PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) { this.node = node; this.clusterMonitor = clusterMonitor; } @Override public void handle(Pong pong) { if (pong.badResponse()) { clusterMonitor.failed(node, pong.error().get()); } else { if (pong.activeDocuments().isPresent()) { node.setActiveDocuments(pong.activeDocuments().get()); node.setBlockingWrites(pong.isBlockingWrites()); } clusterMonitor.responded(node); } } }
class PongCallback implements PongHandler { private final ClusterMonitor<Node> clusterMonitor; private final Node node; PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) { this.node = node; this.clusterMonitor = clusterMonitor; } @Override public void handle(Pong pong) { if (pong.badResponse()) { clusterMonitor.failed(node, pong.error().get()); } else { if (pong.activeDocuments().isPresent()) { node.setActiveDocuments(pong.activeDocuments().get()); node.setBlockingWrites(pong.isBlockingWrites()); } clusterMonitor.responded(node); } } }
?
public boolean isGroupWellBalanced(OptionalInt groupId) { if (groupId.isEmpty()) return false; Group group = groups().get(groupId.getAsInt()); return (group != null) && group.isContentWellBalanced(); }
Group group = groups().get(groupId.getAsInt());
public boolean isGroupWellBalanced(OptionalInt groupId) { if (groupId.isEmpty()) return false; Group group = groups().get(groupId.getAsInt()); return (group != null) && group.isContentWellBalanced(); }
class PongCallback implements PongHandler { private final ClusterMonitor<Node> clusterMonitor; private final Node node; PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) { this.node = node; this.clusterMonitor = clusterMonitor; } @Override public void handle(Pong pong) { if (pong.badResponse()) { clusterMonitor.failed(node, pong.error().get()); } else { if (pong.activeDocuments().isPresent()) { node.setActiveDocuments(pong.activeDocuments().get()); node.setBlockingWrites(pong.isBlockingWrites()); } clusterMonitor.responded(node); } } }
class PongCallback implements PongHandler { private final ClusterMonitor<Node> clusterMonitor; private final Node node; PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) { this.node = node; this.clusterMonitor = clusterMonitor; } @Override public void handle(Pong pong) { if (pong.badResponse()) { clusterMonitor.failed(node, pong.error().get()); } else { if (pong.activeDocuments().isPresent()) { node.setActiveDocuments(pong.activeDocuments().get()); node.setBlockingWrites(pong.isBlockingWrites()); } clusterMonitor.responded(node); } } }
Same as 😆
public boolean isGroupWellBalanced(OptionalInt groupId) { if (groupId.isEmpty()) return false; Group group = groups().get(groupId.getAsInt()); return (group != null) && group.isContentWellBalanced(); }
Group group = groups().get(groupId.getAsInt());
public boolean isGroupWellBalanced(OptionalInt groupId) { if (groupId.isEmpty()) return false; Group group = groups().get(groupId.getAsInt()); return (group != null) && group.isContentWellBalanced(); }
class PongCallback implements PongHandler { private final ClusterMonitor<Node> clusterMonitor; private final Node node; PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) { this.node = node; this.clusterMonitor = clusterMonitor; } @Override public void handle(Pong pong) { if (pong.badResponse()) { clusterMonitor.failed(node, pong.error().get()); } else { if (pong.activeDocuments().isPresent()) { node.setActiveDocuments(pong.activeDocuments().get()); node.setBlockingWrites(pong.isBlockingWrites()); } clusterMonitor.responded(node); } } }
class PongCallback implements PongHandler { private final ClusterMonitor<Node> clusterMonitor; private final Node node; PongCallback(Node node, ClusterMonitor<Node> clusterMonitor) { this.node = node; this.clusterMonitor = clusterMonitor; } @Override public void handle(Pong pong) { if (pong.badResponse()) { clusterMonitor.failed(node, pong.error().get()); } else { if (pong.activeDocuments().isPresent()) { node.setActiveDocuments(pong.activeDocuments().get()); node.setBlockingWrites(pong.isBlockingWrites()); } clusterMonitor.responded(node); } } }
![Random numbers](https://assets.amuniversal.com/321a39e06d6401301d80001dd8b71c47) Perhaps add the groups to a list and pick and remove at random from that list until it is empty?
private Group selectRandomGroupWithSufficientCoverage(SearchCluster cluster, List<Integer> groupIds) { for (int numRetries = 0; numRetries < groupIds.size()*2; numRetries++) { int index = random.nextInt(groupIds.size()); int groupId = groupIds.get(index); Optional<Group> group = cluster.group(groupId); if (group.isPresent()) { if (group.get().hasSufficientCoverage()) { return group.get(); } } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (groupId + 1) + " groups"); } } return cluster.groups().values().iterator().next(); }
for (int numRetries = 0; numRetries < groupIds.size()*2; numRetries++) {
private Group selectRandomGroupWithSufficientCoverage(SearchCluster cluster, List<Integer> groupIds) { while ( ! groupIds.isEmpty()) { int index = random.nextInt(groupIds.size()); int groupId = groupIds.get(index); Optional<Group> group = cluster.group(groupId); if (group.isPresent()) { if (group.get().hasSufficientCoverage()) { return group.get(); } else { groupIds.remove(index); } } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (groupId + 1) + " groups"); } } return cluster.groups().values().iterator().next(); }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<NodeSelection> nodes; private final List<NodeSelection> groups; private static final Random random = new Random(); private SearchPath(List<NodeSelection> nodes, List<NodeSelection> group) { this.nodes = nodes; this.groups = group; } private List<Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && groups.isEmpty(); } private Group selectGroup(SearchCluster cluster) { if ( ! groups.isEmpty()) { List<Integer> potentialGroups = new ArrayList<>(); for (NodeSelection groupSelection : groups) { for (int group = groupSelection.from; group < groupSelection.to; group++) { potentialGroups.add(group); } } return selectRandomGroupWithSufficientCoverage(cluster, potentialGroups); } return selectRandomGroupWithSufficientCoverage(cluster, cluster.groups().keySet().asList()); } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); List<NodeSelection> groups = parseNodes(nodesAndGroup.getSecond()); return new SearchPath(nodes, groups); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$|/$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); int start = Integer.parseInt(m.group(1)); int end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1)); } return new Pair<>(string, ""); } private static void selectionToString(StringBuilder sb, List<NodeSelection> nodes) { boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); selectionToString(sb, nodes); if ( ! groups.isEmpty()) { sb.append('/'); selectionToString(sb, groups); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = Math.min(to, max); return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<Selection> nodes; private final List<Selection> groups; private static final Random random = new Random(); private SearchPath(List<Selection> nodes, List<Selection> groups) { this.nodes = nodes; this.groups = groups; } private List<Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (Selection node : nodes) { wanted.addAll(node.matches(max)); } List<Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && groups.isEmpty(); } private Group selectGroup(SearchCluster cluster) { if ( ! groups.isEmpty()) { List<Integer> potentialGroups = new ArrayList<>(); for (Selection groupSelection : groups) { for (int group = groupSelection.from; group < groupSelection.to; group++) { potentialGroups.add(group); } } return selectRandomGroupWithSufficientCoverage(cluster, potentialGroups); } return selectRandomGroupWithSufficientCoverage(cluster, new ArrayList<>(cluster.groups().keySet())); } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroups = halveAt('/', element); List<Selection> nodes = parseSelection(nodesAndGroups.getFirst()); List<Selection> groups = parseSelection(nodesAndGroups.getSecond()); return new SearchPath(nodes, groups); } private static List<Selection> parseSelection(String nodes) { List<Selection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$|/$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<Selection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); int start = Integer.parseInt(m.group(1)); int end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Selection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<Selection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Selection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1)); } return new Pair<>(string, ""); } private static void selectionToString(StringBuilder sb, List<Selection> nodes) { boolean first = true; for (Selection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); selectionToString(sb, nodes); if ( ! groups.isEmpty()) { sb.append('/'); selectionToString(sb, groups); } return sb.toString(); } private static class Selection { private final int from; private final int to; Selection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = Math.min(to, max); return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
Done
private Group selectRandomGroupWithSufficientCoverage(SearchCluster cluster, List<Integer> groupIds) { for (int numRetries = 0; numRetries < groupIds.size()*2; numRetries++) { int index = random.nextInt(groupIds.size()); int groupId = groupIds.get(index); Optional<Group> group = cluster.group(groupId); if (group.isPresent()) { if (group.get().hasSufficientCoverage()) { return group.get(); } } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (groupId + 1) + " groups"); } } return cluster.groups().values().iterator().next(); }
for (int numRetries = 0; numRetries < groupIds.size()*2; numRetries++) {
private Group selectRandomGroupWithSufficientCoverage(SearchCluster cluster, List<Integer> groupIds) { while ( ! groupIds.isEmpty()) { int index = random.nextInt(groupIds.size()); int groupId = groupIds.get(index); Optional<Group> group = cluster.group(groupId); if (group.isPresent()) { if (group.get().hasSufficientCoverage()) { return group.get(); } else { groupIds.remove(index); } } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (groupId + 1) + " groups"); } } return cluster.groups().values().iterator().next(); }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<NodeSelection> nodes; private final List<NodeSelection> groups; private static final Random random = new Random(); private SearchPath(List<NodeSelection> nodes, List<NodeSelection> group) { this.nodes = nodes; this.groups = group; } private List<Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && groups.isEmpty(); } private Group selectGroup(SearchCluster cluster) { if ( ! groups.isEmpty()) { List<Integer> potentialGroups = new ArrayList<>(); for (NodeSelection groupSelection : groups) { for (int group = groupSelection.from; group < groupSelection.to; group++) { potentialGroups.add(group); } } return selectRandomGroupWithSufficientCoverage(cluster, potentialGroups); } return selectRandomGroupWithSufficientCoverage(cluster, cluster.groups().keySet().asList()); } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); List<NodeSelection> groups = parseNodes(nodesAndGroup.getSecond()); return new SearchPath(nodes, groups); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$|/$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); int start = Integer.parseInt(m.group(1)); int end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1)); } return new Pair<>(string, ""); } private static void selectionToString(StringBuilder sb, List<NodeSelection> nodes) { boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); selectionToString(sb, nodes); if ( ! groups.isEmpty()) { sb.append('/'); selectionToString(sb, groups); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = Math.min(to, max); return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<Selection> nodes; private final List<Selection> groups; private static final Random random = new Random(); private SearchPath(List<Selection> nodes, List<Selection> groups) { this.nodes = nodes; this.groups = groups; } private List<Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (Selection node : nodes) { wanted.addAll(node.matches(max)); } List<Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && groups.isEmpty(); } private Group selectGroup(SearchCluster cluster) { if ( ! groups.isEmpty()) { List<Integer> potentialGroups = new ArrayList<>(); for (Selection groupSelection : groups) { for (int group = groupSelection.from; group < groupSelection.to; group++) { potentialGroups.add(group); } } return selectRandomGroupWithSufficientCoverage(cluster, potentialGroups); } return selectRandomGroupWithSufficientCoverage(cluster, new ArrayList<>(cluster.groups().keySet())); } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroups = halveAt('/', element); List<Selection> nodes = parseSelection(nodesAndGroups.getFirst()); List<Selection> groups = parseSelection(nodesAndGroups.getSecond()); return new SearchPath(nodes, groups); } private static List<Selection> parseSelection(String nodes) { List<Selection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$|/$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<Selection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); int start = Integer.parseInt(m.group(1)); int end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Selection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<Selection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Selection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1)); } return new Pair<>(string, ""); } private static void selectionToString(StringBuilder sb, List<Selection> nodes) { boolean first = true; for (Selection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } } @Override public String toString() { StringBuilder sb = new StringBuilder(); selectionToString(sb, nodes); if ( ! groups.isEmpty()) { sb.append('/'); selectionToString(sb, groups); } return sb.toString(); } private static class Selection { private final int from; private final int to; Selection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = Math.min(to, max); return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
Should this be `>` rather than `>=`? `MAX_LENGTH` sounds like it should be possible to create an ID of up to and including the given length
public static IdString createIdString(String id) { if (id.length() >= MAX_LENGTH) { throw new IllegalArgumentException("Document id length " + id.length() + " is longer than max length of " + MAX_LENGTH); } validateTextString(id); return parseAndCreate(id); }
if (id.length() >= MAX_LENGTH) {
public static IdString createIdString(String id) { if (id.length() > MAX_LENGTH) { throw new IllegalArgumentException("Document id length " + id.length() + " is longer than max length of " + MAX_LENGTH); } validateTextString(id); return parseAndCreate(id); }
class IdString { public boolean hasDocType() { return false; } public String getDocType() { return ""; } public boolean hasGroup() { return false; } public boolean hasNumber() { return false; } public long getNumber() { return 0; } public String getGroup() { return ""; } public enum Scheme { id } private final Scheme scheme; private final String namespace; private final String namespaceSpecific; private Utf8String cache; static final int MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC = 0xff00; public static final int MAX_LENGTH = 0x10000; /** * Creates a IdString based on the given document id string. * * The document id string can only contain text characters. */ /** * Creates a IdString based on the given document id string. This is a less strict variant * for creating 'illegal' document ids for documents already fed. Only use when strictly needed. */ @Beta public static IdString createIdStringLessStrict(String id) { validateTextString(id); return parseAndCreate(id); } /** * Creates a IdString based on the given serialized document id string. * * The document id string can not contain 0x0 byte characters. */ public static IdString createFromSerialized(String id) { validateNoZeroBytes(id); return parseAndCreate(id); } private static void validateTextString(String id) { OptionalInt illegalCodePoint = Text.validateTextString(id); if (illegalCodePoint.isPresent()) { throw new IllegalArgumentException("Unparseable id '" + id + "': Contains illegal code point 0x" + Integer.toHexString(illegalCodePoint.getAsInt()).toUpperCase()); } } private static void validateNoZeroBytes(String id) { for (int i = 0; i < id.length(); i++) { if (id.codePointAt(i) == 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Contains illegal zero byte code point"); } } } private static IdString parseAndCreate(String id) { String namespace; int schemePos = id.indexOf(":"); if (schemePos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Scheme missing"); } String schemeStr = id.substring(0, schemePos); int currPos = schemePos + 1; int colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Namespace missing"); } else { namespace = id.substring(currPos, colonPos); if (namespace.length() == 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Namespace must be non-empty"); } currPos = colonPos + 1; } if (schemeStr.equals("id")) { colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Document type missing"); } String type = id.substring(currPos, colonPos); currPos = colonPos + 1; colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Key/value section missing"); } else if (colonPos >= MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC) { throw new IllegalArgumentException("Document id prior to the namespace specific part, " + colonPos + ", is longer than " + MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC + " id: " + id); } String keyValues = id.substring(currPos, colonPos); currPos = colonPos + 1; return new IdIdString(namespace, type, keyValues, id.substring(currPos)); } else { throw new IllegalArgumentException("Unknown id scheme '" + schemeStr + "'"); } } protected IdString(Scheme scheme, String namespace, String namespaceSpecific) { this.scheme = scheme; this.namespace = namespace; this.namespaceSpecific = namespaceSpecific; } public Scheme getType() { return scheme; } public String getNamespace() { return namespace; } public String getNamespaceSpecific() { return namespaceSpecific; } public abstract long getLocation(); public String getSchemeParameters() { return ""; } public abstract String getSchemeSpecific(); public boolean equals(Object o) { return (o instanceof IdString && o.toString().equals(toString())); } public int hashCode() { return toString().hashCode(); } private Utf8String createToString() { return new Utf8String(scheme.toString() + getSchemeParameters() + ':' + namespace + ':' + getSchemeSpecific() + namespaceSpecific); } public String toString() { if (cache == null) { cache = createToString(); } return cache.toString(); } public Utf8String toUtf8() { if (cache == null) { cache = createToString(); } return cache; } }
class IdString { public boolean hasDocType() { return false; } public String getDocType() { return ""; } public boolean hasGroup() { return false; } public boolean hasNumber() { return false; } public long getNumber() { return 0; } public String getGroup() { return ""; } public enum Scheme { id } private final Scheme scheme; private final String namespace; private final String namespaceSpecific; private Utf8String cache; static final int MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC = 0xff00; public static final int MAX_LENGTH = 0x10000; /** * Creates a IdString based on the given document id string. * * The document id string can only contain text characters. */ /** * Creates a IdString based on the given document id string. This is a less strict variant * for creating 'illegal' document ids for documents already fed. Only use when strictly needed. */ @Beta public static IdString createIdStringLessStrict(String id) { validateTextString(id); return parseAndCreate(id); } /** * Creates a IdString based on the given serialized document id string. * * The document id string can not contain 0x0 byte characters. */ public static IdString createFromSerialized(String id) { validateNoZeroBytes(id); return parseAndCreate(id); } private static void validateTextString(String id) { OptionalInt illegalCodePoint = Text.validateTextString(id); if (illegalCodePoint.isPresent()) { throw new IllegalArgumentException("Unparseable id '" + id + "': Contains illegal code point 0x" + Integer.toHexString(illegalCodePoint.getAsInt()).toUpperCase()); } } private static void validateNoZeroBytes(String id) { for (int i = 0; i < id.length(); i++) { if (id.codePointAt(i) == 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Contains illegal zero byte code point"); } } } private static IdString parseAndCreate(String id) { String namespace; int schemePos = id.indexOf(":"); if (schemePos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Scheme missing"); } String schemeStr = id.substring(0, schemePos); int currPos = schemePos + 1; int colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Namespace missing"); } else { namespace = id.substring(currPos, colonPos); if (namespace.length() == 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Namespace must be non-empty"); } currPos = colonPos + 1; } if (schemeStr.equals("id")) { colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Document type missing"); } String type = id.substring(currPos, colonPos); currPos = colonPos + 1; colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Key/value section missing"); } else if (colonPos >= MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC) { throw new IllegalArgumentException("Document id prior to the namespace specific part, " + colonPos + ", is longer than " + MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC + " id: " + id); } String keyValues = id.substring(currPos, colonPos); currPos = colonPos + 1; return new IdIdString(namespace, type, keyValues, id.substring(currPos)); } else { throw new IllegalArgumentException("Unknown id scheme '" + schemeStr + "'"); } } protected IdString(Scheme scheme, String namespace, String namespaceSpecific) { this.scheme = scheme; this.namespace = namespace; this.namespaceSpecific = namespaceSpecific; } public Scheme getType() { return scheme; } public String getNamespace() { return namespace; } public String getNamespaceSpecific() { return namespaceSpecific; } public abstract long getLocation(); public String getSchemeParameters() { return ""; } public abstract String getSchemeSpecific(); public boolean equals(Object o) { return (o instanceof IdString && o.toString().equals(toString())); } public int hashCode() { return toString().hashCode(); } private Utf8String createToString() { return new Utf8String(scheme.toString() + getSchemeParameters() + ':' + namespace + ':' + getSchemeSpecific() + namespaceSpecific); } public String toString() { if (cache == null) { cache = createToString(); } return cache.toString(); } public Utf8String toUtf8() { if (cache == null) { cache = createToString(); } return cache; } }
Nit: s/backdor/backdoor/ 🚪
public void requireTooLongIdThrowsWhileParsing() throws Exception { StringBuilder builder = new StringBuilder("id:ns:type::namespacespecificpart_01"); for (int i = 0; i < 0x10000; i++) { builder.append('n'); } try { IdString.createIdString(builder.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals("Document id length 65572 is longer than max length of 65536", e.getMessage()); } assertEquals(65572, IdString.createIdStringLessStrict(builder.toString()).toString().length()); }
public void requireTooLongIdThrowsWhileParsing() { StringBuilder builder = new StringBuilder("id:ns:type::namespacespecificpart_01"); for (int i = 0; i < 0x10000; i++) { builder.append('n'); } try { IdString.createIdString(builder.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals("Document id length 65572 is longer than max length of 65536", e.getMessage()); } assertEquals(65572, IdString.createIdStringLessStrict(builder.toString()).toString().length()); }
class IdIdStringTest { @Test public void requireThatIdIdStringGeneratesProperString() throws Exception { DocumentId docId = new DocumentId(new IdIdString("namespace", "type", "g=group", "foobar")); assertEquals("id:namespace:type:g=group:foobar", docId.toString()); } @Test public void requireThatEmptyKeyValuesAreOk() throws Exception { DocumentId docId = new DocumentId(new IdIdString("namespace", "type", "", "foobar")); assertEquals("id:namespace:type::foobar", docId.toString()); } @Test public void requireThatIdIdStringCanBehaveLikeGroupDoc() throws Exception { DocumentId docId1 = new DocumentId(new IdIdString("namespace", "type", "g=foo", "foo")); DocumentId docId2 = new DocumentId(new IdIdString("namespace", "type", "g=foo", "bar")); DocumentId docId3 = new DocumentId(new IdIdString("namespace", "type", "g=bar", "baz")); assertEquals(docId1.getScheme().getLocation(), docId2.getScheme().getLocation()); assert(docId1.getScheme().getLocation() != docId3.getScheme().getLocation()); } @Test public void requireThatIdIdStringCanBehaveLikeUserDoc() throws Exception { DocumentId docId1 = new DocumentId(new IdIdString("namespace", "type", "n=10", "foo")); DocumentId docId2 = new DocumentId(new IdIdString("namespace", "type", "n=10", "bar")); DocumentId docId3 = new DocumentId(new IdIdString("namespace", "type", "n=20", "baz")); assertEquals(docId1.getScheme().getLocation(), docId2.getScheme().getLocation()); assert(docId1.getScheme().getLocation() != docId3.getScheme().getLocation()); } @Test public void requireThatIllegalKeyValuesThrow() throws Exception { try { new IdIdString("namespace", "type", "illegal=key", "foo"); fail(); } catch (IllegalArgumentException e) { assertEquals("Illegal key 'illegal'", e.getMessage()); } } @Test public void requireThatKeysWithoutValuesThrow() throws Exception { try { new IdIdString("namespace", "type", "illegal-pair", "foo"); fail(); } catch (IllegalArgumentException e) { assertEquals("Illegal key-value pair 'illegal-pair'", e.getMessage()); } } @Test @Test public void requireThatTooLongPreNamespaceSpecificThrowsWhileParsing() throws Exception { StringBuilder builder = new StringBuilder("id:"); for (int i = 0; i < 0xff00; i++) { builder.append('n'); } builder.append(":type::namespacespecificpart_01"); try { IdString.createIdString(builder.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals("Document id prior to the namespace specific part, 65289, is longer than 65280", e.getMessage().substring(0, 77)); } } @Test public void requireThatTooLongPreNamespaceSpecificThrowsOnConstruction() { StringBuilder builder = new StringBuilder(); for (int i = 0; i < 0xff00; i++) { builder.append('n'); } try { new IdIdString(builder.toString(), "type", "", "namespacespecificpart_01"); fail(); } catch (IllegalArgumentException e) { assertEquals("Length of namespace(65280) + doctype(4) + key/values(0), is longer than 65275", e.getMessage()); } } @Test public void requireThatIdIdStringCanReplaceType() throws Exception { String type = IdIdString.replaceType("id:namespace:type::foo", "newType"); assertEquals("id:namespace:newType::foo", type); } }
class IdIdStringTest { @Test public void requireThatIdIdStringGeneratesProperString() { DocumentId docId = new DocumentId(new IdIdString("namespace", "type", "g=group", "foobar")); assertEquals("id:namespace:type:g=group:foobar", docId.toString()); } @Test public void requireThatEmptyKeyValuesAreOk() { DocumentId docId = new DocumentId(new IdIdString("namespace", "type", "", "foobar")); assertEquals("id:namespace:type::foobar", docId.toString()); } @Test public void requireThatIdIdStringCanBehaveLikeGroupDoc() { DocumentId docId1 = new DocumentId(new IdIdString("namespace", "type", "g=foo", "foo")); DocumentId docId2 = new DocumentId(new IdIdString("namespace", "type", "g=foo", "bar")); DocumentId docId3 = new DocumentId(new IdIdString("namespace", "type", "g=bar", "baz")); assertEquals(docId1.getScheme().getLocation(), docId2.getScheme().getLocation()); assert(docId1.getScheme().getLocation() != docId3.getScheme().getLocation()); } @Test public void requireThatIdIdStringCanBehaveLikeUserDoc() { DocumentId docId1 = new DocumentId(new IdIdString("namespace", "type", "n=10", "foo")); DocumentId docId2 = new DocumentId(new IdIdString("namespace", "type", "n=10", "bar")); DocumentId docId3 = new DocumentId(new IdIdString("namespace", "type", "n=20", "baz")); assertEquals(docId1.getScheme().getLocation(), docId2.getScheme().getLocation()); assert(docId1.getScheme().getLocation() != docId3.getScheme().getLocation()); } @Test public void requireThatIllegalKeyValuesThrow() { try { new IdIdString("namespace", "type", "illegal=key", "foo"); fail(); } catch (IllegalArgumentException e) { assertEquals("Illegal key 'illegal'", e.getMessage()); } } @Test public void requireThatKeysWithoutValuesThrow() { try { new IdIdString("namespace", "type", "illegal-pair", "foo"); fail(); } catch (IllegalArgumentException e) { assertEquals("Illegal key-value pair 'illegal-pair'", e.getMessage()); } } @Test @Test public void requireThatTooLongPreNamespaceSpecificThrowsWhileParsing() { StringBuilder builder = new StringBuilder("id:"); for (int i = 0; i < 0xff00; i++) { builder.append('n'); } builder.append(":type::namespacespecificpart_01"); try { IdString.createIdString(builder.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals("Document id prior to the namespace specific part, 65289, is longer than 65280", e.getMessage().substring(0, 77)); } } @Test public void requireThatTooLongPreNamespaceSpecificThrowsOnConstruction() { StringBuilder builder = new StringBuilder(); for (int i = 0; i < 0xff00; i++) { builder.append('n'); } try { new IdIdString(builder.toString(), "type", "", "namespacespecificpart_01"); fail(); } catch (IllegalArgumentException e) { assertEquals("Length of namespace(65280) + doctype(4) + key/values(0), is longer than 65275", e.getMessage()); } } @Test public void requireThatIdIdStringCanReplaceType() { String type = IdIdString.replaceType("id:namespace:type::foo", "newType"); assertEquals("id:namespace:newType::foo", type); } }
space between `+` and `"`
public YumTester andReturn(boolean value) { terminal.expectCommand("yum --version 2>&1", 0, yumVersion.toFullString() + "\ntrailing garbage\n"); String quiet = yumVersion.getMajor() < 4 ? " --quiet" : ""; terminal.expectCommand("yum" + quiet +" versionlock list 2>&1", 0, packages.get(0).toVersionLockName(yumVersion)); return super.andReturn(value); }
terminal.expectCommand("yum" + quiet +" versionlock list 2>&1", 0, packages.get(0).toVersionLockName(yumVersion));
public YumTester andReturn(boolean value) { if (value) return execute("Success"); switch (command) { case "install": return execute("Nothing to do"); case "upgrade": return execute("No packages marked for update"); case "remove": return execute("No Packages marked for removal"); default: throw new IllegalArgumentException("Unknown command: " + command); } }
class GenericYumCommandExpectation { private final String command; protected final List<YumPackageName> packages; private List<String> enableRepos = List.of(); private GenericYumCommandExpectation(String command, String... packages) { this.command = command; this.packages = Stream.of(packages).map(YumPackageName::fromString).collect(Collectors.toList()); } public GenericYumCommandExpectation withEnableRepo(String... repo) { this.enableRepos = List.of(repo); return this; } /** Mock the return value of the converge(TaskContext) method for this operation (true iff system was modified) */ protected void expectYumVersion() { terminal.expectCommand("yum --version 2>&1", 0, yumVersion.toFullString() + "\ntrailing garbage\n"); } private YumTester execute(String output) { StringBuilder cmd = new StringBuilder(); cmd.append("yum ").append(command).append(" --assumeyes"); enableRepos.forEach(repo -> cmd.append(" --enablerepo=").append(repo)); if ("install".equals(command) && packages.size() > 1) cmd.append(" --setopt skip_missing_names_on_install=False"); if ("upgrade".equals(command) && packages.size() > 1) cmd.append(" --setopt skip_missing_names_on_update=False"); packages.forEach(pkg -> { String name = pkg.toName(yumVersion); if (name.contains("(") || name.contains(")")) { name = "\"" + name + "\""; } cmd.append(" ").append(name); }); cmd.append(" 2>&1"); expectYumVersion(); terminal.expectCommand(cmd.toString(), 0, output); return YumTester.this; } }
class GenericYumCommandExpectation { private final String command; protected final List<YumPackageName> packages; private List<String> enableRepos = List.of(); private GenericYumCommandExpectation(String command, String... packages) { this.command = command; this.packages = Stream.of(packages).map(YumPackageName::fromString).collect(Collectors.toList()); } public GenericYumCommandExpectation withEnableRepo(String... repo) { this.enableRepos = List.of(repo); return this; } /** Mock the return value of the converge(TaskContext) method for this operation (true iff system was modified) */ protected void expectYumVersion() { terminal.expectCommand("yum --version 2>&1", 0, yumVersion.toFullString() + "\ntrailing garbage\n"); } private YumTester execute(String output) { StringBuilder cmd = new StringBuilder(); cmd.append("yum ").append(command).append(" --assumeyes"); enableRepos.forEach(repo -> cmd.append(" --enablerepo=").append(repo)); if ("install".equals(command) && packages.size() > 1) cmd.append(" --setopt skip_missing_names_on_install=False"); if ("upgrade".equals(command) && packages.size() > 1) cmd.append(" --setopt skip_missing_names_on_update=False"); packages.forEach(pkg -> { String name = pkg.toName(yumVersion); if (name.contains("(") || name.contains(")")) { name = "\"" + name + "\""; } cmd.append(" ").append(name); }); cmd.append(" 2>&1"); expectYumVersion(); terminal.expectCommand(cmd.toString(), 0, output); return YumTester.this; } }
Should the reported max length be `IdString.MAX_LENGTH` instead of hardcoded in the string?
public DocumentId(String id) { if (id == null) { throw new IllegalArgumentException("Cannot create DocumentId from null id."); } if (id.length() > IdString.MAX_LENGTH) { throw new IllegalArgumentException("The document id(" + id.length() + ") is too long(65536). " + "However if you have already fed a document earlier on and want to remove it, you can do so by " + "calling new DocumentId(IdString.createIdStringLessStrict()) that will bypass this restriction."); } this.id = IdString.createIdString(id); globalId = null; }
throw new IllegalArgumentException("The document id(" + id.length() + ") is too long(65536). " +
public DocumentId(String id) { if (id == null) { throw new IllegalArgumentException("Cannot create DocumentId from null id."); } if (id.length() > IdString.MAX_LENGTH) { throw new IllegalArgumentException("The document id(" + id.length() + ") is too long(" + IdString.MAX_LENGTH + "). " + "However if you have already fed a document earlier on and want to remove it, you can do so by " + "calling new DocumentId(IdString.createIdStringLessStrict()) that will bypass this restriction."); } this.id = IdString.createIdString(id); globalId = null; }
class DocumentId extends Identifiable implements Serializable { private IdString id; private GlobalId globalId; /** * Constructor used for deserialization. */ public DocumentId(Deserializer buf) { deserialize(buf); } /** * Creates a document id based on the given document id URI string. * * The document id string can only contain text characters. */ public DocumentId(IdString id) { this.id = id; globalId = null; } /** * Creates a document id based on a serialized document id URI string. * * The document id string is not allowed to contain 0x0 byte characters. * Otherwise all characters are allowed to ensure that document ids * already stored can be de-serialized. */ public static DocumentId createFromSerialized(String id) { return new DocumentId(IdString.createFromSerialized(id)); } @Override public DocumentId clone() { return (DocumentId)super.clone(); } public void setId(IdString id) { this.id = id; } public IdString getScheme() { return id; } public byte[] getGlobalId() { if (globalId == null) { globalId = new GlobalId(id); } return globalId.getRawId(); } public int compareTo(Object o) { DocumentId cmp = (DocumentId)o; return id.toString().compareTo(cmp.id.toString()); } public boolean equals(Object o) { return o instanceof DocumentId && id.equals(((DocumentId)o).id); } public int hashCode() { return id.hashCode(); } public String toString() { return id.toString(); } @Override public void onSerialize(Serializer target) throws SerializationException { if (target instanceof DocumentWriter) { ((DocumentWriter)target).write(this); } else { target.put(null, id.toString()); } } public void onDeserialize(Deserializer data) throws DeserializationException { if (data instanceof DocumentReader) { id = ((DocumentReader)data).readDocumentId().getScheme(); } else { id = IdString.createFromSerialized(data.getString(null)); } } public boolean hasDocType() { return id.hasDocType(); } public String getDocType() { return id.getDocType(); } }
class DocumentId extends Identifiable implements Serializable { private IdString id; private GlobalId globalId; /** * Constructor used for deserialization. */ public DocumentId(Deserializer buf) { deserialize(buf); } /** * Creates a document id based on the given document id URI string. * * The document id string can only contain text characters. */ public DocumentId(IdString id) { this.id = id; globalId = null; } /** * Creates a document id based on a serialized document id URI string. * * The document id string is not allowed to contain 0x0 byte characters. * Otherwise all characters are allowed to ensure that document ids * already stored can be de-serialized. */ public static DocumentId createFromSerialized(String id) { return new DocumentId(IdString.createFromSerialized(id)); } @Override public DocumentId clone() { return (DocumentId)super.clone(); } public void setId(IdString id) { this.id = id; } public IdString getScheme() { return id; } public byte[] getGlobalId() { if (globalId == null) { globalId = new GlobalId(id); } return globalId.getRawId(); } public int compareTo(Object o) { DocumentId cmp = (DocumentId)o; return id.toString().compareTo(cmp.id.toString()); } public boolean equals(Object o) { return o instanceof DocumentId && id.equals(((DocumentId)o).id); } public int hashCode() { return id.hashCode(); } public String toString() { return id.toString(); } @Override public void onSerialize(Serializer target) throws SerializationException { if (target instanceof DocumentWriter) { ((DocumentWriter)target).write(this); } else { target.put(null, id.toString()); } } public void onDeserialize(Deserializer data) throws DeserializationException { if (data instanceof DocumentReader) { id = ((DocumentReader)data).readDocumentId().getScheme(); } else { id = IdString.createFromSerialized(data.getString(null)); } } public boolean hasDocType() { return id.hasDocType(); } public String getDocType() { return id.getDocType(); } }
Correct, fixed
public DocumentId(String id) { if (id == null) { throw new IllegalArgumentException("Cannot create DocumentId from null id."); } if (id.length() > IdString.MAX_LENGTH) { throw new IllegalArgumentException("The document id(" + id.length() + ") is too long(65536). " + "However if you have already fed a document earlier on and want to remove it, you can do so by " + "calling new DocumentId(IdString.createIdStringLessStrict()) that will bypass this restriction."); } this.id = IdString.createIdString(id); globalId = null; }
throw new IllegalArgumentException("The document id(" + id.length() + ") is too long(65536). " +
public DocumentId(String id) { if (id == null) { throw new IllegalArgumentException("Cannot create DocumentId from null id."); } if (id.length() > IdString.MAX_LENGTH) { throw new IllegalArgumentException("The document id(" + id.length() + ") is too long(" + IdString.MAX_LENGTH + "). " + "However if you have already fed a document earlier on and want to remove it, you can do so by " + "calling new DocumentId(IdString.createIdStringLessStrict()) that will bypass this restriction."); } this.id = IdString.createIdString(id); globalId = null; }
class DocumentId extends Identifiable implements Serializable { private IdString id; private GlobalId globalId; /** * Constructor used for deserialization. */ public DocumentId(Deserializer buf) { deserialize(buf); } /** * Creates a document id based on the given document id URI string. * * The document id string can only contain text characters. */ public DocumentId(IdString id) { this.id = id; globalId = null; } /** * Creates a document id based on a serialized document id URI string. * * The document id string is not allowed to contain 0x0 byte characters. * Otherwise all characters are allowed to ensure that document ids * already stored can be de-serialized. */ public static DocumentId createFromSerialized(String id) { return new DocumentId(IdString.createFromSerialized(id)); } @Override public DocumentId clone() { return (DocumentId)super.clone(); } public void setId(IdString id) { this.id = id; } public IdString getScheme() { return id; } public byte[] getGlobalId() { if (globalId == null) { globalId = new GlobalId(id); } return globalId.getRawId(); } public int compareTo(Object o) { DocumentId cmp = (DocumentId)o; return id.toString().compareTo(cmp.id.toString()); } public boolean equals(Object o) { return o instanceof DocumentId && id.equals(((DocumentId)o).id); } public int hashCode() { return id.hashCode(); } public String toString() { return id.toString(); } @Override public void onSerialize(Serializer target) throws SerializationException { if (target instanceof DocumentWriter) { ((DocumentWriter)target).write(this); } else { target.put(null, id.toString()); } } public void onDeserialize(Deserializer data) throws DeserializationException { if (data instanceof DocumentReader) { id = ((DocumentReader)data).readDocumentId().getScheme(); } else { id = IdString.createFromSerialized(data.getString(null)); } } public boolean hasDocType() { return id.hasDocType(); } public String getDocType() { return id.getDocType(); } }
class DocumentId extends Identifiable implements Serializable { private IdString id; private GlobalId globalId; /** * Constructor used for deserialization. */ public DocumentId(Deserializer buf) { deserialize(buf); } /** * Creates a document id based on the given document id URI string. * * The document id string can only contain text characters. */ public DocumentId(IdString id) { this.id = id; globalId = null; } /** * Creates a document id based on a serialized document id URI string. * * The document id string is not allowed to contain 0x0 byte characters. * Otherwise all characters are allowed to ensure that document ids * already stored can be de-serialized. */ public static DocumentId createFromSerialized(String id) { return new DocumentId(IdString.createFromSerialized(id)); } @Override public DocumentId clone() { return (DocumentId)super.clone(); } public void setId(IdString id) { this.id = id; } public IdString getScheme() { return id; } public byte[] getGlobalId() { if (globalId == null) { globalId = new GlobalId(id); } return globalId.getRawId(); } public int compareTo(Object o) { DocumentId cmp = (DocumentId)o; return id.toString().compareTo(cmp.id.toString()); } public boolean equals(Object o) { return o instanceof DocumentId && id.equals(((DocumentId)o).id); } public int hashCode() { return id.hashCode(); } public String toString() { return id.toString(); } @Override public void onSerialize(Serializer target) throws SerializationException { if (target instanceof DocumentWriter) { ((DocumentWriter)target).write(this); } else { target.put(null, id.toString()); } } public void onDeserialize(Deserializer data) throws DeserializationException { if (data instanceof DocumentReader) { id = ((DocumentReader)data).readDocumentId().getScheme(); } else { id = IdString.createFromSerialized(data.getString(null)); } } public boolean hasDocType() { return id.hasDocType(); } public String getDocType() { return id.getDocType(); } }
Fixed
public void requireTooLongIdThrowsWhileParsing() throws Exception { StringBuilder builder = new StringBuilder("id:ns:type::namespacespecificpart_01"); for (int i = 0; i < 0x10000; i++) { builder.append('n'); } try { IdString.createIdString(builder.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals("Document id length 65572 is longer than max length of 65536", e.getMessage()); } assertEquals(65572, IdString.createIdStringLessStrict(builder.toString()).toString().length()); }
public void requireTooLongIdThrowsWhileParsing() { StringBuilder builder = new StringBuilder("id:ns:type::namespacespecificpart_01"); for (int i = 0; i < 0x10000; i++) { builder.append('n'); } try { IdString.createIdString(builder.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals("Document id length 65572 is longer than max length of 65536", e.getMessage()); } assertEquals(65572, IdString.createIdStringLessStrict(builder.toString()).toString().length()); }
class IdIdStringTest { @Test public void requireThatIdIdStringGeneratesProperString() throws Exception { DocumentId docId = new DocumentId(new IdIdString("namespace", "type", "g=group", "foobar")); assertEquals("id:namespace:type:g=group:foobar", docId.toString()); } @Test public void requireThatEmptyKeyValuesAreOk() throws Exception { DocumentId docId = new DocumentId(new IdIdString("namespace", "type", "", "foobar")); assertEquals("id:namespace:type::foobar", docId.toString()); } @Test public void requireThatIdIdStringCanBehaveLikeGroupDoc() throws Exception { DocumentId docId1 = new DocumentId(new IdIdString("namespace", "type", "g=foo", "foo")); DocumentId docId2 = new DocumentId(new IdIdString("namespace", "type", "g=foo", "bar")); DocumentId docId3 = new DocumentId(new IdIdString("namespace", "type", "g=bar", "baz")); assertEquals(docId1.getScheme().getLocation(), docId2.getScheme().getLocation()); assert(docId1.getScheme().getLocation() != docId3.getScheme().getLocation()); } @Test public void requireThatIdIdStringCanBehaveLikeUserDoc() throws Exception { DocumentId docId1 = new DocumentId(new IdIdString("namespace", "type", "n=10", "foo")); DocumentId docId2 = new DocumentId(new IdIdString("namespace", "type", "n=10", "bar")); DocumentId docId3 = new DocumentId(new IdIdString("namespace", "type", "n=20", "baz")); assertEquals(docId1.getScheme().getLocation(), docId2.getScheme().getLocation()); assert(docId1.getScheme().getLocation() != docId3.getScheme().getLocation()); } @Test public void requireThatIllegalKeyValuesThrow() throws Exception { try { new IdIdString("namespace", "type", "illegal=key", "foo"); fail(); } catch (IllegalArgumentException e) { assertEquals("Illegal key 'illegal'", e.getMessage()); } } @Test public void requireThatKeysWithoutValuesThrow() throws Exception { try { new IdIdString("namespace", "type", "illegal-pair", "foo"); fail(); } catch (IllegalArgumentException e) { assertEquals("Illegal key-value pair 'illegal-pair'", e.getMessage()); } } @Test @Test public void requireThatTooLongPreNamespaceSpecificThrowsWhileParsing() throws Exception { StringBuilder builder = new StringBuilder("id:"); for (int i = 0; i < 0xff00; i++) { builder.append('n'); } builder.append(":type::namespacespecificpart_01"); try { IdString.createIdString(builder.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals("Document id prior to the namespace specific part, 65289, is longer than 65280", e.getMessage().substring(0, 77)); } } @Test public void requireThatTooLongPreNamespaceSpecificThrowsOnConstruction() { StringBuilder builder = new StringBuilder(); for (int i = 0; i < 0xff00; i++) { builder.append('n'); } try { new IdIdString(builder.toString(), "type", "", "namespacespecificpart_01"); fail(); } catch (IllegalArgumentException e) { assertEquals("Length of namespace(65280) + doctype(4) + key/values(0), is longer than 65275", e.getMessage()); } } @Test public void requireThatIdIdStringCanReplaceType() throws Exception { String type = IdIdString.replaceType("id:namespace:type::foo", "newType"); assertEquals("id:namespace:newType::foo", type); } }
class IdIdStringTest { @Test public void requireThatIdIdStringGeneratesProperString() { DocumentId docId = new DocumentId(new IdIdString("namespace", "type", "g=group", "foobar")); assertEquals("id:namespace:type:g=group:foobar", docId.toString()); } @Test public void requireThatEmptyKeyValuesAreOk() { DocumentId docId = new DocumentId(new IdIdString("namespace", "type", "", "foobar")); assertEquals("id:namespace:type::foobar", docId.toString()); } @Test public void requireThatIdIdStringCanBehaveLikeGroupDoc() { DocumentId docId1 = new DocumentId(new IdIdString("namespace", "type", "g=foo", "foo")); DocumentId docId2 = new DocumentId(new IdIdString("namespace", "type", "g=foo", "bar")); DocumentId docId3 = new DocumentId(new IdIdString("namespace", "type", "g=bar", "baz")); assertEquals(docId1.getScheme().getLocation(), docId2.getScheme().getLocation()); assert(docId1.getScheme().getLocation() != docId3.getScheme().getLocation()); } @Test public void requireThatIdIdStringCanBehaveLikeUserDoc() { DocumentId docId1 = new DocumentId(new IdIdString("namespace", "type", "n=10", "foo")); DocumentId docId2 = new DocumentId(new IdIdString("namespace", "type", "n=10", "bar")); DocumentId docId3 = new DocumentId(new IdIdString("namespace", "type", "n=20", "baz")); assertEquals(docId1.getScheme().getLocation(), docId2.getScheme().getLocation()); assert(docId1.getScheme().getLocation() != docId3.getScheme().getLocation()); } @Test public void requireThatIllegalKeyValuesThrow() { try { new IdIdString("namespace", "type", "illegal=key", "foo"); fail(); } catch (IllegalArgumentException e) { assertEquals("Illegal key 'illegal'", e.getMessage()); } } @Test public void requireThatKeysWithoutValuesThrow() { try { new IdIdString("namespace", "type", "illegal-pair", "foo"); fail(); } catch (IllegalArgumentException e) { assertEquals("Illegal key-value pair 'illegal-pair'", e.getMessage()); } } @Test @Test public void requireThatTooLongPreNamespaceSpecificThrowsWhileParsing() { StringBuilder builder = new StringBuilder("id:"); for (int i = 0; i < 0xff00; i++) { builder.append('n'); } builder.append(":type::namespacespecificpart_01"); try { IdString.createIdString(builder.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals("Document id prior to the namespace specific part, 65289, is longer than 65280", e.getMessage().substring(0, 77)); } } @Test public void requireThatTooLongPreNamespaceSpecificThrowsOnConstruction() { StringBuilder builder = new StringBuilder(); for (int i = 0; i < 0xff00; i++) { builder.append('n'); } try { new IdIdString(builder.toString(), "type", "", "namespacespecificpart_01"); fail(); } catch (IllegalArgumentException e) { assertEquals("Length of namespace(65280) + doctype(4) + key/values(0), is longer than 65275", e.getMessage()); } } @Test public void requireThatIdIdStringCanReplaceType() { String type = IdIdString.replaceType("id:namespace:type::foo", "newType"); assertEquals("id:namespace:newType::foo", type); } }
Correct fixed
public static IdString createIdString(String id) { if (id.length() >= MAX_LENGTH) { throw new IllegalArgumentException("Document id length " + id.length() + " is longer than max length of " + MAX_LENGTH); } validateTextString(id); return parseAndCreate(id); }
if (id.length() >= MAX_LENGTH) {
public static IdString createIdString(String id) { if (id.length() > MAX_LENGTH) { throw new IllegalArgumentException("Document id length " + id.length() + " is longer than max length of " + MAX_LENGTH); } validateTextString(id); return parseAndCreate(id); }
class IdString { public boolean hasDocType() { return false; } public String getDocType() { return ""; } public boolean hasGroup() { return false; } public boolean hasNumber() { return false; } public long getNumber() { return 0; } public String getGroup() { return ""; } public enum Scheme { id } private final Scheme scheme; private final String namespace; private final String namespaceSpecific; private Utf8String cache; static final int MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC = 0xff00; public static final int MAX_LENGTH = 0x10000; /** * Creates a IdString based on the given document id string. * * The document id string can only contain text characters. */ /** * Creates a IdString based on the given document id string. This is a less strict variant * for creating 'illegal' document ids for documents already fed. Only use when strictly needed. */ @Beta public static IdString createIdStringLessStrict(String id) { validateTextString(id); return parseAndCreate(id); } /** * Creates a IdString based on the given serialized document id string. * * The document id string can not contain 0x0 byte characters. */ public static IdString createFromSerialized(String id) { validateNoZeroBytes(id); return parseAndCreate(id); } private static void validateTextString(String id) { OptionalInt illegalCodePoint = Text.validateTextString(id); if (illegalCodePoint.isPresent()) { throw new IllegalArgumentException("Unparseable id '" + id + "': Contains illegal code point 0x" + Integer.toHexString(illegalCodePoint.getAsInt()).toUpperCase()); } } private static void validateNoZeroBytes(String id) { for (int i = 0; i < id.length(); i++) { if (id.codePointAt(i) == 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Contains illegal zero byte code point"); } } } private static IdString parseAndCreate(String id) { String namespace; int schemePos = id.indexOf(":"); if (schemePos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Scheme missing"); } String schemeStr = id.substring(0, schemePos); int currPos = schemePos + 1; int colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Namespace missing"); } else { namespace = id.substring(currPos, colonPos); if (namespace.length() == 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Namespace must be non-empty"); } currPos = colonPos + 1; } if (schemeStr.equals("id")) { colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Document type missing"); } String type = id.substring(currPos, colonPos); currPos = colonPos + 1; colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Key/value section missing"); } else if (colonPos >= MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC) { throw new IllegalArgumentException("Document id prior to the namespace specific part, " + colonPos + ", is longer than " + MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC + " id: " + id); } String keyValues = id.substring(currPos, colonPos); currPos = colonPos + 1; return new IdIdString(namespace, type, keyValues, id.substring(currPos)); } else { throw new IllegalArgumentException("Unknown id scheme '" + schemeStr + "'"); } } protected IdString(Scheme scheme, String namespace, String namespaceSpecific) { this.scheme = scheme; this.namespace = namespace; this.namespaceSpecific = namespaceSpecific; } public Scheme getType() { return scheme; } public String getNamespace() { return namespace; } public String getNamespaceSpecific() { return namespaceSpecific; } public abstract long getLocation(); public String getSchemeParameters() { return ""; } public abstract String getSchemeSpecific(); public boolean equals(Object o) { return (o instanceof IdString && o.toString().equals(toString())); } public int hashCode() { return toString().hashCode(); } private Utf8String createToString() { return new Utf8String(scheme.toString() + getSchemeParameters() + ':' + namespace + ':' + getSchemeSpecific() + namespaceSpecific); } public String toString() { if (cache == null) { cache = createToString(); } return cache.toString(); } public Utf8String toUtf8() { if (cache == null) { cache = createToString(); } return cache; } }
class IdString { public boolean hasDocType() { return false; } public String getDocType() { return ""; } public boolean hasGroup() { return false; } public boolean hasNumber() { return false; } public long getNumber() { return 0; } public String getGroup() { return ""; } public enum Scheme { id } private final Scheme scheme; private final String namespace; private final String namespaceSpecific; private Utf8String cache; static final int MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC = 0xff00; public static final int MAX_LENGTH = 0x10000; /** * Creates a IdString based on the given document id string. * * The document id string can only contain text characters. */ /** * Creates a IdString based on the given document id string. This is a less strict variant * for creating 'illegal' document ids for documents already fed. Only use when strictly needed. */ @Beta public static IdString createIdStringLessStrict(String id) { validateTextString(id); return parseAndCreate(id); } /** * Creates a IdString based on the given serialized document id string. * * The document id string can not contain 0x0 byte characters. */ public static IdString createFromSerialized(String id) { validateNoZeroBytes(id); return parseAndCreate(id); } private static void validateTextString(String id) { OptionalInt illegalCodePoint = Text.validateTextString(id); if (illegalCodePoint.isPresent()) { throw new IllegalArgumentException("Unparseable id '" + id + "': Contains illegal code point 0x" + Integer.toHexString(illegalCodePoint.getAsInt()).toUpperCase()); } } private static void validateNoZeroBytes(String id) { for (int i = 0; i < id.length(); i++) { if (id.codePointAt(i) == 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Contains illegal zero byte code point"); } } } private static IdString parseAndCreate(String id) { String namespace; int schemePos = id.indexOf(":"); if (schemePos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Scheme missing"); } String schemeStr = id.substring(0, schemePos); int currPos = schemePos + 1; int colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Namespace missing"); } else { namespace = id.substring(currPos, colonPos); if (namespace.length() == 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Namespace must be non-empty"); } currPos = colonPos + 1; } if (schemeStr.equals("id")) { colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Document type missing"); } String type = id.substring(currPos, colonPos); currPos = colonPos + 1; colonPos = id.indexOf(":", currPos); if (colonPos < 0) { throw new IllegalArgumentException("Unparseable id '" + id + "': Key/value section missing"); } else if (colonPos >= MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC) { throw new IllegalArgumentException("Document id prior to the namespace specific part, " + colonPos + ", is longer than " + MAX_LENGTH_EXCEPT_NAMESPACE_SPECIFIC + " id: " + id); } String keyValues = id.substring(currPos, colonPos); currPos = colonPos + 1; return new IdIdString(namespace, type, keyValues, id.substring(currPos)); } else { throw new IllegalArgumentException("Unknown id scheme '" + schemeStr + "'"); } } protected IdString(Scheme scheme, String namespace, String namespaceSpecific) { this.scheme = scheme; this.namespace = namespace; this.namespaceSpecific = namespaceSpecific; } public Scheme getType() { return scheme; } public String getNamespace() { return namespace; } public String getNamespaceSpecific() { return namespaceSpecific; } public abstract long getLocation(); public String getSchemeParameters() { return ""; } public abstract String getSchemeSpecific(); public boolean equals(Object o) { return (o instanceof IdString && o.toString().equals(toString())); } public int hashCode() { return toString().hashCode(); } private Utf8String createToString() { return new Utf8String(scheme.toString() + getSchemeParameters() + ':' + namespace + ':' + getSchemeSpecific() + namespaceSpecific); } public String toString() { if (cache == null) { cache = createToString(); } return cache.toString(); } public Utf8String toUtf8() { if (cache == null) { cache = createToString(); } return cache; } }
Shouldn't this be `if (expr instanceof NegativeNode && ! shouldGenerateFeature((NegativeNode) expr).getValue())) return false; ` ?
private boolean shouldGenerateFeature(ExpressionNode expr) { if (expr instanceof ConstantNode) return false; if (expr instanceof ReferenceNode) return false; if (expr instanceof NameNode) return false; if (expr instanceof FunctionNode) return false; if (expr instanceof NegativeNode && ((NegativeNode) expr).getValue() instanceof ConstantNode) return false; return true; }
if (expr instanceof NegativeNode && ((NegativeNode) expr).getValue() instanceof ConstantNode) return false;
private boolean shouldGenerateFeature(ExpressionNode expr) { if (expr instanceof ConstantNode) return false; if (expr instanceof ReferenceNode) return false; if (expr instanceof NameNode) return false; if (expr instanceof FunctionNode) return false; if (expr instanceof NegativeNode && ((NegativeNode) expr).getValue() instanceof ConstantNode) return false; return true; }
class ExpressionFunction { private final String name; private final ImmutableList<String> arguments; /** Types of the inputs, if known. The keys here is any subset (including empty and identity) of the argument list */ private final ImmutableMap<String, TensorType> argumentTypes; private final RankingExpression body; private final Optional<TensorType> returnType; /** * Constructs a new function with no arguments * * @param name the name of this function * @param body the ranking expression that defines this function */ public ExpressionFunction(String name, RankingExpression body) { this(name, Collections.emptyList(), body); } /** * Constructs a new function * * @param name the name of this function * @param arguments its argument names * @param body the ranking expression that defines this function */ public ExpressionFunction(String name, List<String> arguments, RankingExpression body) { this(name, arguments, body, ImmutableMap.of(), Optional.empty()); } public ExpressionFunction(String name, List<String> arguments, RankingExpression body, Map<String, TensorType> argumentTypes, Optional<TensorType> returnType) { this.name = Objects.requireNonNull(name, "name cannot be null"); this.arguments = arguments==null ? ImmutableList.of() : ImmutableList.copyOf(arguments); this.body = Objects.requireNonNull(body, "body cannot be null"); if ( ! this.arguments.containsAll(argumentTypes.keySet())) throw new IllegalArgumentException("Argument type keys must be a subset of the argument keys"); this.argumentTypes = ImmutableMap.copyOf(argumentTypes); this.returnType = Objects.requireNonNull(returnType, "returnType cannot be null"); } public String getName() { return name; } /** Returns an immutable list of the arguments of this */ public List<String> arguments() { return arguments; } public RankingExpression getBody() { return body; } /** Returns the types of the arguments of this, if specified. The keys of this may be any subset of the arguments */ public Map<String, TensorType> argumentTypes() { return argumentTypes; } /** Returns the return type of this, or empty if not specified */ public Optional<TensorType> returnType() { return returnType; } public ExpressionFunction withName(String name) { return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** Returns a copy of this with the body changed to the given value */ public ExpressionFunction withBody(RankingExpression body) { return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } public ExpressionFunction withReturnType(TensorType returnType) { return new ExpressionFunction(name, arguments, body, argumentTypes, Optional.of(returnType)); } /** Returns a copy of this with the given argument added (if not already present) */ public ExpressionFunction withArgument(String argument) { if (arguments.contains(argument)) return this; List<String> arguments = new ArrayList<>(this.arguments); arguments.add(argument); return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** Returns a copy of this with the given argument (if not present) and argument type added */ public ExpressionFunction withArgument(String argument, TensorType type) { List<String> arguments = new ArrayList<>(this.arguments); if ( ! arguments.contains(argument)) arguments.add(argument); Map<String, TensorType> argumentTypes = new HashMap<>(this.argumentTypes); argumentTypes.put(argument, type); return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** * Creates and returns an instance of this function based on the given * arguments. If function calls are nested, this call may produce * additional functions. * * @param context the context used to expand this * @param argumentValues the arguments to instantiate on. * @param path the expansion path leading to this. * @return the script function instance created. */ public Instance expand(SerializationContext context, List<ExpressionNode> argumentValues, Deque<String> path) { Map<String, String> argumentBindings = new HashMap<>(); for (int i = 0; i < arguments.size() && i < argumentValues.size(); ++i) { String key = arguments.get(i); ExpressionNode expr = argumentValues.get(i); String binding = expr.toString(new StringBuilder(), context, path, null).toString(); if (shouldGenerateFeature(expr)) { String funcName = "autogenerated_ranking_feature@" + Long.toHexString(symbolCode(key + "=" + binding)); context.addFunctionSerialization(RankingExpression.propertyName(funcName), binding); binding = "rankingExpression(" + funcName + ")"; } argumentBindings.put(key, binding); } context = argumentBindings.isEmpty() ? context.withoutBindings() : context.withBindings(argumentBindings); String symbol = toSymbol(argumentBindings); String expressionString = body.getRoot().toString(new StringBuilder(), context, path, null).toString(); return new Instance(symbol, expressionString); } /** * Returns a symbolic string that represents this function with a given * list of arguments. The arguments are mangled by hashing the string * representation of the argument expressions. * * @param argumentBindings the bound arguments to include in the symbolic name. * @return the symbolic name for an instance of this function */ private String toSymbol(Map<String, String> argumentBindings) { if (argumentBindings.isEmpty()) return name; StringBuilder ret = new StringBuilder(); ret.append(name).append("@"); for (Map.Entry<String,String> argumentBinding : argumentBindings.entrySet()) { ret.append(Long.toHexString(symbolCode(argumentBinding.getKey() + "=" + argumentBinding.getValue()))); ret.append("."); } if (ret.toString().endsWith(".")) ret.setLength(ret.length()-1); return ret.toString(); } /** * Returns a more unique hash code than what Java's own {@link * String * * @param str The string to hash. * @return A 64 bit long hash code. */ private static long symbolCode(String str) { try { MessageDigest md = java.security.MessageDigest.getInstance("SHA-1"); byte[] buf = md.digest(Utf8.toBytes(str)); if (buf.length >= 8) { long ret = 0; for (int i = 0; i < 8; ++i) { ret = (ret << 8) + (buf[i] & 0xff); } return ret; } } catch (NoSuchAlgorithmException e) { throw new Error("java must always support SHA-1 message digest format", e); } return str.hashCode(); } @Override public String toString() { return "function '" + name + "'"; } /** * An instance of a serialization of this function, using a particular serialization context (by {@link * ExpressionFunction */ public class Instance { private final String name; private final String expressionString; public Instance(String name, String expressionString) { this.name = name; this.expressionString = expressionString; } public String getName() { return name; } public String getExpressionString() { return expressionString; } } }
class ExpressionFunction { private final String name; private final ImmutableList<String> arguments; /** Types of the inputs, if known. The keys here is any subset (including empty and identity) of the argument list */ private final ImmutableMap<String, TensorType> argumentTypes; private final RankingExpression body; private final Optional<TensorType> returnType; /** * Constructs a new function with no arguments * * @param name the name of this function * @param body the ranking expression that defines this function */ public ExpressionFunction(String name, RankingExpression body) { this(name, Collections.emptyList(), body); } /** * Constructs a new function * * @param name the name of this function * @param arguments its argument names * @param body the ranking expression that defines this function */ public ExpressionFunction(String name, List<String> arguments, RankingExpression body) { this(name, arguments, body, ImmutableMap.of(), Optional.empty()); } public ExpressionFunction(String name, List<String> arguments, RankingExpression body, Map<String, TensorType> argumentTypes, Optional<TensorType> returnType) { this.name = Objects.requireNonNull(name, "name cannot be null"); this.arguments = arguments==null ? ImmutableList.of() : ImmutableList.copyOf(arguments); this.body = Objects.requireNonNull(body, "body cannot be null"); if ( ! this.arguments.containsAll(argumentTypes.keySet())) throw new IllegalArgumentException("Argument type keys must be a subset of the argument keys"); this.argumentTypes = ImmutableMap.copyOf(argumentTypes); this.returnType = Objects.requireNonNull(returnType, "returnType cannot be null"); } public String getName() { return name; } /** Returns an immutable list of the arguments of this */ public List<String> arguments() { return arguments; } public RankingExpression getBody() { return body; } /** Returns the types of the arguments of this, if specified. The keys of this may be any subset of the arguments */ public Map<String, TensorType> argumentTypes() { return argumentTypes; } /** Returns the return type of this, or empty if not specified */ public Optional<TensorType> returnType() { return returnType; } public ExpressionFunction withName(String name) { return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** Returns a copy of this with the body changed to the given value */ public ExpressionFunction withBody(RankingExpression body) { return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } public ExpressionFunction withReturnType(TensorType returnType) { return new ExpressionFunction(name, arguments, body, argumentTypes, Optional.of(returnType)); } /** Returns a copy of this with the given argument added (if not already present) */ public ExpressionFunction withArgument(String argument) { if (arguments.contains(argument)) return this; List<String> arguments = new ArrayList<>(this.arguments); arguments.add(argument); return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** Returns a copy of this with the given argument (if not present) and argument type added */ public ExpressionFunction withArgument(String argument, TensorType type) { List<String> arguments = new ArrayList<>(this.arguments); if ( ! arguments.contains(argument)) arguments.add(argument); Map<String, TensorType> argumentTypes = new HashMap<>(this.argumentTypes); argumentTypes.put(argument, type); return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** * Creates and returns an instance of this function based on the given * arguments. If function calls are nested, this call may produce * additional functions. * * @param context the context used to expand this * @param argumentValues the arguments to instantiate on. * @param path the expansion path leading to this. * @return the script function instance created. */ public Instance expand(SerializationContext context, List<ExpressionNode> argumentValues, Deque<String> path) { Map<String, String> argumentBindings = new HashMap<>(); for (int i = 0; i < arguments.size() && i < argumentValues.size(); ++i) { String key = arguments.get(i); ExpressionNode expr = argumentValues.get(i); String binding = expr.toString(new StringBuilder(), context, path, null).toString(); if (shouldGenerateFeature(expr)) { String funcName = "autogenerated_ranking_feature@" + Long.toHexString(symbolCode(key + "=" + binding)); context.addFunctionSerialization(RankingExpression.propertyName(funcName), binding); binding = "rankingExpression(" + funcName + ")"; } argumentBindings.put(key, binding); } context = argumentBindings.isEmpty() ? context.withoutBindings() : context.withBindings(argumentBindings); String symbol = toSymbol(argumentBindings); String expressionString = body.getRoot().toString(new StringBuilder(), context, path, null).toString(); return new Instance(symbol, expressionString); } /** * Returns a symbolic string that represents this function with a given * list of arguments. The arguments are mangled by hashing the string * representation of the argument expressions. * * @param argumentBindings the bound arguments to include in the symbolic name. * @return the symbolic name for an instance of this function */ private String toSymbol(Map<String, String> argumentBindings) { if (argumentBindings.isEmpty()) return name; StringBuilder ret = new StringBuilder(); ret.append(name).append("@"); for (Map.Entry<String,String> argumentBinding : argumentBindings.entrySet()) { ret.append(Long.toHexString(symbolCode(argumentBinding.getKey() + "=" + argumentBinding.getValue()))); ret.append("."); } if (ret.toString().endsWith(".")) ret.setLength(ret.length()-1); return ret.toString(); } /** * Returns a more unique hash code than what Java's own {@link * String * * @param str The string to hash. * @return A 64 bit long hash code. */ private static long symbolCode(String str) { try { MessageDigest md = java.security.MessageDigest.getInstance("SHA-1"); byte[] buf = md.digest(Utf8.toBytes(str)); if (buf.length >= 8) { long ret = 0; for (int i = 0; i < 8; ++i) { ret = (ret << 8) + (buf[i] & 0xff); } return ret; } } catch (NoSuchAlgorithmException e) { throw new Error("java must always support SHA-1 message digest format", e); } return str.hashCode(); } @Override public String toString() { return "function '" + name + "'"; } /** * An instance of a serialization of this function, using a particular serialization context (by {@link * ExpressionFunction */ public class Instance { private final String name; private final String expressionString; public Instance(String name, String expressionString) { this.name = name; this.expressionString = expressionString; } public String getName() { return name; } public String getExpressionString() { return expressionString; } } }
Not really. This captures the fact that the expression parsing represents a negative constant as a `NegativeNode` containing a `ConstantNode`. So that can be output safely as is. Previous parsing (old `arg`) had a regular `ConstantNode` with negative value. A `NegativeNode` in general should probably be in an expression. For instance `-attribute(field)` (`NegativeNode` with a `ReferenceNode`).
private boolean shouldGenerateFeature(ExpressionNode expr) { if (expr instanceof ConstantNode) return false; if (expr instanceof ReferenceNode) return false; if (expr instanceof NameNode) return false; if (expr instanceof FunctionNode) return false; if (expr instanceof NegativeNode && ((NegativeNode) expr).getValue() instanceof ConstantNode) return false; return true; }
if (expr instanceof NegativeNode && ((NegativeNode) expr).getValue() instanceof ConstantNode) return false;
private boolean shouldGenerateFeature(ExpressionNode expr) { if (expr instanceof ConstantNode) return false; if (expr instanceof ReferenceNode) return false; if (expr instanceof NameNode) return false; if (expr instanceof FunctionNode) return false; if (expr instanceof NegativeNode && ((NegativeNode) expr).getValue() instanceof ConstantNode) return false; return true; }
class ExpressionFunction { private final String name; private final ImmutableList<String> arguments; /** Types of the inputs, if known. The keys here is any subset (including empty and identity) of the argument list */ private final ImmutableMap<String, TensorType> argumentTypes; private final RankingExpression body; private final Optional<TensorType> returnType; /** * Constructs a new function with no arguments * * @param name the name of this function * @param body the ranking expression that defines this function */ public ExpressionFunction(String name, RankingExpression body) { this(name, Collections.emptyList(), body); } /** * Constructs a new function * * @param name the name of this function * @param arguments its argument names * @param body the ranking expression that defines this function */ public ExpressionFunction(String name, List<String> arguments, RankingExpression body) { this(name, arguments, body, ImmutableMap.of(), Optional.empty()); } public ExpressionFunction(String name, List<String> arguments, RankingExpression body, Map<String, TensorType> argumentTypes, Optional<TensorType> returnType) { this.name = Objects.requireNonNull(name, "name cannot be null"); this.arguments = arguments==null ? ImmutableList.of() : ImmutableList.copyOf(arguments); this.body = Objects.requireNonNull(body, "body cannot be null"); if ( ! this.arguments.containsAll(argumentTypes.keySet())) throw new IllegalArgumentException("Argument type keys must be a subset of the argument keys"); this.argumentTypes = ImmutableMap.copyOf(argumentTypes); this.returnType = Objects.requireNonNull(returnType, "returnType cannot be null"); } public String getName() { return name; } /** Returns an immutable list of the arguments of this */ public List<String> arguments() { return arguments; } public RankingExpression getBody() { return body; } /** Returns the types of the arguments of this, if specified. The keys of this may be any subset of the arguments */ public Map<String, TensorType> argumentTypes() { return argumentTypes; } /** Returns the return type of this, or empty if not specified */ public Optional<TensorType> returnType() { return returnType; } public ExpressionFunction withName(String name) { return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** Returns a copy of this with the body changed to the given value */ public ExpressionFunction withBody(RankingExpression body) { return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } public ExpressionFunction withReturnType(TensorType returnType) { return new ExpressionFunction(name, arguments, body, argumentTypes, Optional.of(returnType)); } /** Returns a copy of this with the given argument added (if not already present) */ public ExpressionFunction withArgument(String argument) { if (arguments.contains(argument)) return this; List<String> arguments = new ArrayList<>(this.arguments); arguments.add(argument); return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** Returns a copy of this with the given argument (if not present) and argument type added */ public ExpressionFunction withArgument(String argument, TensorType type) { List<String> arguments = new ArrayList<>(this.arguments); if ( ! arguments.contains(argument)) arguments.add(argument); Map<String, TensorType> argumentTypes = new HashMap<>(this.argumentTypes); argumentTypes.put(argument, type); return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** * Creates and returns an instance of this function based on the given * arguments. If function calls are nested, this call may produce * additional functions. * * @param context the context used to expand this * @param argumentValues the arguments to instantiate on. * @param path the expansion path leading to this. * @return the script function instance created. */ public Instance expand(SerializationContext context, List<ExpressionNode> argumentValues, Deque<String> path) { Map<String, String> argumentBindings = new HashMap<>(); for (int i = 0; i < arguments.size() && i < argumentValues.size(); ++i) { String key = arguments.get(i); ExpressionNode expr = argumentValues.get(i); String binding = expr.toString(new StringBuilder(), context, path, null).toString(); if (shouldGenerateFeature(expr)) { String funcName = "autogenerated_ranking_feature@" + Long.toHexString(symbolCode(key + "=" + binding)); context.addFunctionSerialization(RankingExpression.propertyName(funcName), binding); binding = "rankingExpression(" + funcName + ")"; } argumentBindings.put(key, binding); } context = argumentBindings.isEmpty() ? context.withoutBindings() : context.withBindings(argumentBindings); String symbol = toSymbol(argumentBindings); String expressionString = body.getRoot().toString(new StringBuilder(), context, path, null).toString(); return new Instance(symbol, expressionString); } /** * Returns a symbolic string that represents this function with a given * list of arguments. The arguments are mangled by hashing the string * representation of the argument expressions. * * @param argumentBindings the bound arguments to include in the symbolic name. * @return the symbolic name for an instance of this function */ private String toSymbol(Map<String, String> argumentBindings) { if (argumentBindings.isEmpty()) return name; StringBuilder ret = new StringBuilder(); ret.append(name).append("@"); for (Map.Entry<String,String> argumentBinding : argumentBindings.entrySet()) { ret.append(Long.toHexString(symbolCode(argumentBinding.getKey() + "=" + argumentBinding.getValue()))); ret.append("."); } if (ret.toString().endsWith(".")) ret.setLength(ret.length()-1); return ret.toString(); } /** * Returns a more unique hash code than what Java's own {@link * String * * @param str The string to hash. * @return A 64 bit long hash code. */ private static long symbolCode(String str) { try { MessageDigest md = java.security.MessageDigest.getInstance("SHA-1"); byte[] buf = md.digest(Utf8.toBytes(str)); if (buf.length >= 8) { long ret = 0; for (int i = 0; i < 8; ++i) { ret = (ret << 8) + (buf[i] & 0xff); } return ret; } } catch (NoSuchAlgorithmException e) { throw new Error("java must always support SHA-1 message digest format", e); } return str.hashCode(); } @Override public String toString() { return "function '" + name + "'"; } /** * An instance of a serialization of this function, using a particular serialization context (by {@link * ExpressionFunction */ public class Instance { private final String name; private final String expressionString; public Instance(String name, String expressionString) { this.name = name; this.expressionString = expressionString; } public String getName() { return name; } public String getExpressionString() { return expressionString; } } }
class ExpressionFunction { private final String name; private final ImmutableList<String> arguments; /** Types of the inputs, if known. The keys here is any subset (including empty and identity) of the argument list */ private final ImmutableMap<String, TensorType> argumentTypes; private final RankingExpression body; private final Optional<TensorType> returnType; /** * Constructs a new function with no arguments * * @param name the name of this function * @param body the ranking expression that defines this function */ public ExpressionFunction(String name, RankingExpression body) { this(name, Collections.emptyList(), body); } /** * Constructs a new function * * @param name the name of this function * @param arguments its argument names * @param body the ranking expression that defines this function */ public ExpressionFunction(String name, List<String> arguments, RankingExpression body) { this(name, arguments, body, ImmutableMap.of(), Optional.empty()); } public ExpressionFunction(String name, List<String> arguments, RankingExpression body, Map<String, TensorType> argumentTypes, Optional<TensorType> returnType) { this.name = Objects.requireNonNull(name, "name cannot be null"); this.arguments = arguments==null ? ImmutableList.of() : ImmutableList.copyOf(arguments); this.body = Objects.requireNonNull(body, "body cannot be null"); if ( ! this.arguments.containsAll(argumentTypes.keySet())) throw new IllegalArgumentException("Argument type keys must be a subset of the argument keys"); this.argumentTypes = ImmutableMap.copyOf(argumentTypes); this.returnType = Objects.requireNonNull(returnType, "returnType cannot be null"); } public String getName() { return name; } /** Returns an immutable list of the arguments of this */ public List<String> arguments() { return arguments; } public RankingExpression getBody() { return body; } /** Returns the types of the arguments of this, if specified. The keys of this may be any subset of the arguments */ public Map<String, TensorType> argumentTypes() { return argumentTypes; } /** Returns the return type of this, or empty if not specified */ public Optional<TensorType> returnType() { return returnType; } public ExpressionFunction withName(String name) { return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** Returns a copy of this with the body changed to the given value */ public ExpressionFunction withBody(RankingExpression body) { return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } public ExpressionFunction withReturnType(TensorType returnType) { return new ExpressionFunction(name, arguments, body, argumentTypes, Optional.of(returnType)); } /** Returns a copy of this with the given argument added (if not already present) */ public ExpressionFunction withArgument(String argument) { if (arguments.contains(argument)) return this; List<String> arguments = new ArrayList<>(this.arguments); arguments.add(argument); return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** Returns a copy of this with the given argument (if not present) and argument type added */ public ExpressionFunction withArgument(String argument, TensorType type) { List<String> arguments = new ArrayList<>(this.arguments); if ( ! arguments.contains(argument)) arguments.add(argument); Map<String, TensorType> argumentTypes = new HashMap<>(this.argumentTypes); argumentTypes.put(argument, type); return new ExpressionFunction(name, arguments, body, argumentTypes, returnType); } /** * Creates and returns an instance of this function based on the given * arguments. If function calls are nested, this call may produce * additional functions. * * @param context the context used to expand this * @param argumentValues the arguments to instantiate on. * @param path the expansion path leading to this. * @return the script function instance created. */ public Instance expand(SerializationContext context, List<ExpressionNode> argumentValues, Deque<String> path) { Map<String, String> argumentBindings = new HashMap<>(); for (int i = 0; i < arguments.size() && i < argumentValues.size(); ++i) { String key = arguments.get(i); ExpressionNode expr = argumentValues.get(i); String binding = expr.toString(new StringBuilder(), context, path, null).toString(); if (shouldGenerateFeature(expr)) { String funcName = "autogenerated_ranking_feature@" + Long.toHexString(symbolCode(key + "=" + binding)); context.addFunctionSerialization(RankingExpression.propertyName(funcName), binding); binding = "rankingExpression(" + funcName + ")"; } argumentBindings.put(key, binding); } context = argumentBindings.isEmpty() ? context.withoutBindings() : context.withBindings(argumentBindings); String symbol = toSymbol(argumentBindings); String expressionString = body.getRoot().toString(new StringBuilder(), context, path, null).toString(); return new Instance(symbol, expressionString); } /** * Returns a symbolic string that represents this function with a given * list of arguments. The arguments are mangled by hashing the string * representation of the argument expressions. * * @param argumentBindings the bound arguments to include in the symbolic name. * @return the symbolic name for an instance of this function */ private String toSymbol(Map<String, String> argumentBindings) { if (argumentBindings.isEmpty()) return name; StringBuilder ret = new StringBuilder(); ret.append(name).append("@"); for (Map.Entry<String,String> argumentBinding : argumentBindings.entrySet()) { ret.append(Long.toHexString(symbolCode(argumentBinding.getKey() + "=" + argumentBinding.getValue()))); ret.append("."); } if (ret.toString().endsWith(".")) ret.setLength(ret.length()-1); return ret.toString(); } /** * Returns a more unique hash code than what Java's own {@link * String * * @param str The string to hash. * @return A 64 bit long hash code. */ private static long symbolCode(String str) { try { MessageDigest md = java.security.MessageDigest.getInstance("SHA-1"); byte[] buf = md.digest(Utf8.toBytes(str)); if (buf.length >= 8) { long ret = 0; for (int i = 0; i < 8; ++i) { ret = (ret << 8) + (buf[i] & 0xff); } return ret; } } catch (NoSuchAlgorithmException e) { throw new Error("java must always support SHA-1 message digest format", e); } return str.hashCode(); } @Override public String toString() { return "function '" + name + "'"; } /** * An instance of a serialization of this function, using a particular serialization context (by {@link * ExpressionFunction */ public class Instance { private final String name; private final String expressionString; public Instance(String name, String expressionString) { this.name = name; this.expressionString = expressionString; } public String getName() { return name; } public String getExpressionString() { return expressionString; } } }
Need to verify this is the best approach
new AbstractModule() { @Override protected void configure() { bind(FilterBindings.class).toInstance(filterBindings); bind(ServerConfig.class).toInstance(new ServerConfig(new ServerConfig.Builder().strictFiltering(strictFiltering))); bind(ConnectorConfig.class).toInstance(new ConnectorConfig(new ConnectorConfig.Builder())); bind(ServletPathsConfig.class).toInstance(new ServletPathsConfig(new ServletPathsConfig.Builder())); bind(ConnectionLog.class).toInstance(new VoidConnectionLog()); } },
bind(ConnectionLog.class).toInstance(new VoidConnectionLog());
new AbstractModule() { @Override protected void configure() { bind(FilterBindings.class).toInstance(filterBindings); bind(ServerConfig.class).toInstance(new ServerConfig(new ServerConfig.Builder().strictFiltering(strictFiltering))); bind(ConnectorConfig.class).toInstance(new ConnectorConfig(new ConnectorConfig.Builder())); bind(ServletPathsConfig.class).toInstance(new ServletPathsConfig(new ServletPathsConfig.Builder())); bind(ConnectionLog.class).toInstance(new VoidConnectionLog()); } }
class FilterTestCase { @Test public void requireThatRequestFilterIsNotRunOnUnboundPath() throws Exception { RequestFilterMockBase filter = mock(RequestFilterMockBase.class); FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filter) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(filter, never()).filter(any(HttpRequest.class), any(ResponseHandler.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterIsRunOnBoundPath() throws Exception { final RequestFilter filter = mock(RequestFilterMockBase.class); FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filter) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/filtered/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(filter, times(1)).filter(any(HttpRequest.class), any(ResponseHandler.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterChangesAreSeenByRequestHandler() throws Exception { final RequestFilter filter = new HeaderRequestFilter("foo", "bar"); FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filter) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); assertThat(requestHandler.getHeaderMap().get("foo").get(0), is("bar")); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterCanRespond() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", new RespondForbiddenFilter()) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.FORBIDDEN)); assertThat(requestHandler.hasBeenInvokedYet(), is(false)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatFilterCanHaveNullCompletionHandler() throws Exception { final int responseStatus = Response.Status.OK; final String responseMessage = "Excellent"; FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", new NullCompletionHandlerFilter(responseStatus, responseMessage)) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html") .expectStatusCode(is(responseStatus)) .expectContent(is(responseMessage)); assertThat(requestHandler.hasBeenInvokedYet(), is(false)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterExecutionIsExceptionSafe() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", new ThrowingRequestFilter()) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.INTERNAL_SERVER_ERROR)); assertThat(requestHandler.hasBeenInvokedYet(), is(false)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterIsNotRunOnUnboundPath() throws Exception { final ResponseFilter filter = mock(ResponseFilterMockBase.class); FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", filter) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(filter, never()).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterIsRunOnBoundPath() throws Exception { final ResponseFilter filter = mock(ResponseFilterMockBase.class); FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", filter) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/filtered/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(filter, times(1)).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterChangesAreWrittenToResponse() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", new HeaderResponseFilter("foo", "bar")) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html") .expectHeader("foo", is("bar")); assertThat(requestHandler.awaitInvocation(), is(true)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterExecutionIsExceptionSafe() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", new ThrowingResponseFilter()) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.INTERNAL_SERVER_ERROR)); assertThat(requestHandler.awaitInvocation(), is(true)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterAndResponseFilterCanBindToSamePath() throws Exception { final RequestFilter requestFilter = mock(RequestFilterMockBase.class); final ResponseFilter responseFilter = mock(ResponseFilterMockBase.class); final String uriPattern = "http: FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", requestFilter) .addRequestFilterBinding("my-request-filter", uriPattern) .addResponseFilter("my-response-filter", responseFilter) .addResponseFilterBinding("my-response-filter", uriPattern) .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(requestFilter, times(1)).filter(any(HttpRequest.class), any(ResponseHandler.class)); verify(responseFilter, times(1)).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFromRequestFilterGoesThroughResponseFilter() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", new RespondForbiddenFilter()) .addRequestFilterBinding("my-request-filter", "http: .addResponseFilter("my-response-filter", new HeaderResponseFilter("foo", "bar")) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html") .expectStatusCode(is(Response.Status.FORBIDDEN)) .expectHeader("foo", is("bar")); assertThat(requestHandler.hasBeenInvokedYet(), is(false)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterChainRetainsFilters() { final RequestFilter requestFilter1 = mock(RequestFilter.class); final RequestFilter requestFilter2 = mock(RequestFilter.class); verify(requestFilter1, never()).refer(); verify(requestFilter2, never()).refer(); final ResourceReference reference1 = mock(ResourceReference.class); final ResourceReference reference2 = mock(ResourceReference.class); when(requestFilter1.refer()).thenReturn(reference1); when(requestFilter2.refer()).thenReturn(reference2); final RequestFilter chain = RequestFilterChain.newInstance(requestFilter1, requestFilter2); verify(requestFilter1, times(1)).refer(); verify(requestFilter2, times(1)).refer(); verify(reference1, never()).close(); verify(reference2, never()).close(); chain.release(); verify(reference1, times(1)).close(); verify(reference2, times(1)).close(); } @Test public void requireThatRequestFilterChainIsRun() throws Exception { final RequestFilter requestFilter1 = mock(RequestFilter.class); final RequestFilter requestFilter2 = mock(RequestFilter.class); final RequestFilter requestFilterChain = RequestFilterChain.newInstance(requestFilter1, requestFilter2); final HttpRequest request = null; final ResponseHandler responseHandler = null; requestFilterChain.filter(request, responseHandler); verify(requestFilter1).filter(isNull(), any(ResponseHandler.class)); verify(requestFilter2).filter(isNull(), any(ResponseHandler.class)); } @Test public void requireThatRequestFilterChainCallsFilterWithOriginalRequest() throws Exception { final RequestFilter requestFilter = mock(RequestFilter.class); final RequestFilter requestFilterChain = RequestFilterChain.newInstance(requestFilter); final HttpRequest request = mock(HttpRequest.class); final ResponseHandler responseHandler = null; requestFilterChain.filter(request, responseHandler); final ArgumentCaptor<HttpRequest> requestCaptor = ArgumentCaptor.forClass(HttpRequest.class); verify(requestFilter).filter(requestCaptor.capture(), isNull()); verify(request, never()).getUri(); requestCaptor.getValue().getUri(); verify(request, times(1)).getUri(); } @Test public void requireThatRequestFilterChainCallsFilterWithOriginalResponseHandler() throws Exception { final RequestFilter requestFilter = mock(RequestFilter.class); final RequestFilter requestFilterChain = RequestFilterChain.newInstance(requestFilter); final HttpRequest request = null; final ResponseHandler responseHandler = mock(ResponseHandler.class); requestFilterChain.filter(request, responseHandler); final ArgumentCaptor<ResponseHandler> responseHandlerCaptor = ArgumentCaptor.forClass(ResponseHandler.class); verify(requestFilter).filter(isNull(), responseHandlerCaptor.capture()); verify(responseHandler, never()).handleResponse(any(Response.class)); responseHandlerCaptor.getValue().handleResponse(mock(Response.class)); verify(responseHandler, times(1)).handleResponse(any(Response.class)); } @Test public void requireThatRequestFilterCanTerminateChain() throws Exception { final RequestFilter requestFilter1 = new RespondForbiddenFilter(); final RequestFilter requestFilter2 = mock(RequestFilter.class); final RequestFilter requestFilterChain = RequestFilterChain.newInstance(requestFilter1, requestFilter2); final HttpRequest request = null; final ResponseHandler responseHandler = mock(ResponseHandler.class); when(responseHandler.handleResponse(any(Response.class))).thenReturn(mock(ContentChannel.class)); requestFilterChain.filter(request, responseHandler); verify(requestFilter2, never()).filter(any(HttpRequest.class), any(ResponseHandler.class)); final ArgumentCaptor<Response> responseCaptor = ArgumentCaptor.forClass(Response.class); verify(responseHandler).handleResponse(responseCaptor.capture()); assertThat(responseCaptor.getValue().getStatus(), is(Response.Status.FORBIDDEN)); } @Test public void requireThatResponseFilterChainRetainsFilters() { final ResponseFilter responseFilter1 = mock(ResponseFilter.class); final ResponseFilter responseFilter2 = mock(ResponseFilter.class); verify(responseFilter1, never()).refer(); verify(responseFilter2, never()).refer(); final ResourceReference reference1 = mock(ResourceReference.class); final ResourceReference reference2 = mock(ResourceReference.class); when(responseFilter1.refer()).thenReturn(reference1); when(responseFilter2.refer()).thenReturn(reference2); final ResponseFilter chain = ResponseFilterChain.newInstance(responseFilter1, responseFilter2); verify(responseFilter1, times(1)).refer(); verify(responseFilter2, times(1)).refer(); verify(reference1, never()).close(); verify(reference2, never()).close(); chain.release(); verify(reference1, times(1)).close(); verify(reference2, times(1)).close(); } @Test public void requireThatResponseFilterChainIsRun() { final ResponseFilter responseFilter1 = new ResponseHeaderFilter("foo", "bar"); final ResponseFilter responseFilter2 = mock(ResponseFilter.class); final int statusCode = Response.Status.BAD_GATEWAY; final Response response = new Response(statusCode); final Request request = null; ResponseFilterChain.newInstance(responseFilter1, responseFilter2).filter(response, request); final ArgumentCaptor<Response> responseCaptor = ArgumentCaptor.forClass(Response.class); verify(responseFilter2).filter(responseCaptor.capture(), isNull()); assertThat(responseCaptor.getValue().getStatus(), is(statusCode)); assertThat(responseCaptor.getValue().headers().getFirst("foo"), is("bar")); assertThat(response.getStatus(), is(statusCode)); assertThat(response.headers().getFirst("foo"), is("bar")); } @Test public void requireThatDefaultRequestFilterChainIsRunIfNoOtherFilterChainMatches() throws IOException, InterruptedException { RequestFilter filterWithBinding = mock(RequestFilter.class); RequestFilter defaultFilter = mock(RequestFilter.class); String defaultFilterId = "default-request-filter"; FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filterWithBinding) .addRequestFilterBinding("my-request-filter", "http: .addRequestFilter(defaultFilterId, defaultFilter) .setRequestFilterDefaultForPort(defaultFilterId, 0) .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(defaultFilter, times(1)).filter(any(HttpRequest.class), any(ResponseHandler.class)); verify(filterWithBinding, never()).filter(any(HttpRequest.class), any(ResponseHandler.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatDefaultResponseFilterChainIsRunIfNoOtherFilterChainMatches() throws IOException, InterruptedException { ResponseFilter filterWithBinding = mock(ResponseFilter.class); ResponseFilter defaultFilter = mock(ResponseFilter.class); String defaultFilterId = "default-response-filter"; FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", filterWithBinding) .addResponseFilterBinding("my-response-filter", "http: .addResponseFilter(defaultFilterId, defaultFilter) .setResponseFilterDefaultForPort(defaultFilterId, 0) .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(defaultFilter, times(1)).filter(any(Response.class), any(Request.class)); verify(filterWithBinding, never()).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterWithBindingMatchHasPrecedenceOverDefaultFilter() throws IOException, InterruptedException { RequestFilterMockBase filterWithBinding = mock(RequestFilterMockBase.class); RequestFilterMockBase defaultFilter = mock(RequestFilterMockBase.class); String defaultFilterId = "default-request-filter"; FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filterWithBinding) .addRequestFilterBinding("my-request-filter", "http: .addRequestFilter(defaultFilterId, defaultFilter) .setRequestFilterDefaultForPort(defaultFilterId, 0) .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/filtered/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(defaultFilter, never()).filter(any(HttpRequest.class), any(ResponseHandler.class)); verify(filterWithBinding).filter(any(HttpRequest.class), any(ResponseHandler.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterWithBindingMatchHasPrecedenceOverDefaultFilter() throws IOException, InterruptedException { ResponseFilter filterWithBinding = mock(ResponseFilter.class); ResponseFilter defaultFilter = mock(ResponseFilter.class); String defaultFilterId = "default-response-filter"; FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", filterWithBinding) .addResponseFilterBinding("my-response-filter", "http: .addResponseFilter(defaultFilterId, defaultFilter) .setResponseFilterDefaultForPort(defaultFilterId, 0) .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/filtered/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(defaultFilter, never()).filter(any(Response.class), any(Request.class)); verify(filterWithBinding, times(1)).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatMetricAreReported() throws IOException, InterruptedException { FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", mock(RequestFilter.class)) .addRequestFilterBinding("my-request-filter", "http: .build(); MetricConsumerMock metricConsumerMock = new MetricConsumerMock(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings, metricConsumerMock, false); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(metricConsumerMock.mockitoMock()) .add(MetricDefinitions.FILTERING_REQUEST_HANDLED, 1L, MetricConsumerMock.STATIC_CONTEXT); verify(metricConsumerMock.mockitoMock(), never()) .add(MetricDefinitions.FILTERING_REQUEST_UNHANDLED, 1L, MetricConsumerMock.STATIC_CONTEXT); verify(metricConsumerMock.mockitoMock(), never()) .add(MetricDefinitions.FILTERING_RESPONSE_HANDLED, 1L, MetricConsumerMock.STATIC_CONTEXT); verify(metricConsumerMock.mockitoMock()) .add(MetricDefinitions.FILTERING_RESPONSE_UNHANDLED, 1L, MetricConsumerMock.STATIC_CONTEXT); assertThat(testDriver.close(), is(true)); } @Test public void requireThatStrictFilteringRejectsRequestsNotMatchingFilterChains() throws IOException { RequestFilter filter = mock(RequestFilter.class); FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filter) .addRequestFilterBinding("my-request-filter", "http: .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings, new MetricConsumerMock(), true); testDriver.client().get("/unfiltered/") .expectStatusCode(is(Response.Status.FORBIDDEN)) .expectContent(containsString("Request did not match any request filter chain")); verify(filter, never()).filter(any(), any()); assertThat(testDriver.close(), is(true)); } private static TestDriver newDriver(MyRequestHandler requestHandler, FilterBindings filterBindings) { return newDriver(requestHandler, filterBindings, new MetricConsumerMock(), false); } private static TestDriver newDriver( MyRequestHandler requestHandler, FilterBindings filterBindings, MetricConsumerMock metricConsumer, boolean strictFiltering) { return TestDriver.newInstance( JettyHttpServer.class, requestHandler, newFilterModule(filterBindings, metricConsumer, strictFiltering)); } private static com.google.inject.Module newFilterModule( FilterBindings filterBindings, MetricConsumerMock metricConsumer, boolean strictFiltering) { return Modules.combine( , new ConnectorFactoryRegistryModule(), metricConsumer.asGuiceModule()); } private static abstract class RequestFilterMockBase extends AbstractResource implements RequestFilter {} private static abstract class ResponseFilterMockBase extends AbstractResource implements ResponseFilter {} private static class MyRequestHandler extends AbstractRequestHandler { private final CountDownLatch invocationLatch = new CountDownLatch(1); private final AtomicReference<Map<String, List<String>>> headerCopy = new AtomicReference<>(null); @Override public ContentChannel handleRequest(final Request request, final ResponseHandler handler) { try { headerCopy.set(new HashMap<String, List<String>>(request.headers())); ResponseDispatch.newInstance(Response.Status.OK).dispatch(handler); return null; } finally { invocationLatch.countDown(); } } public boolean hasBeenInvokedYet() { return invocationLatch.getCount() == 0L; } public boolean awaitInvocation() throws InterruptedException { return invocationLatch.await(60, TimeUnit.SECONDS); } public Map<String, List<String>> getHeaderMap() { return headerCopy.get(); } } private static class RespondForbiddenFilter extends AbstractResource implements RequestFilter { @Override public void filter(final HttpRequest request, final ResponseHandler handler) { ResponseDispatch.newInstance(Response.Status.FORBIDDEN).dispatch(handler); } } private static class ThrowingRequestFilter extends AbstractResource implements RequestFilter { @Override public void filter(final HttpRequest request, final ResponseHandler handler) { throw new RuntimeException(); } } private static class ThrowingResponseFilter extends AbstractResource implements ResponseFilter { @Override public void filter(final Response response, final Request request) { throw new RuntimeException(); } } private static class HeaderRequestFilter extends AbstractResource implements RequestFilter { private final String key; private final String val; public HeaderRequestFilter(final String key, final String val) { this.key = key; this.val = val; } @Override public void filter(final HttpRequest request, final ResponseHandler handler) { request.headers().add(key, val); } } private static class HeaderResponseFilter extends AbstractResource implements ResponseFilter { private final String key; private final String val; public HeaderResponseFilter(final String key, final String val) { this.key = key; this.val = val; } @Override public void filter(final Response response, final Request request) { response.headers().add(key, val); } } public class NullCompletionHandlerFilter extends AbstractResource implements RequestFilter { private final int responseStatus; private final String responseMessage; public NullCompletionHandlerFilter(final int responseStatus, final String responseMessage) { this.responseStatus = responseStatus; this.responseMessage = responseMessage; } @Override public void filter(final HttpRequest request, final ResponseHandler responseHandler) { final HttpResponse response = HttpResponse.newInstance(responseStatus); final ContentChannel channel = responseHandler.handleResponse(response); final CompletionHandler completionHandler = null; channel.write(ByteBuffer.wrap(responseMessage.getBytes()), completionHandler); channel.close(null); } } }
class FilterTestCase { @Test public void requireThatRequestFilterIsNotRunOnUnboundPath() throws Exception { RequestFilterMockBase filter = mock(RequestFilterMockBase.class); FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filter) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(filter, never()).filter(any(HttpRequest.class), any(ResponseHandler.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterIsRunOnBoundPath() throws Exception { final RequestFilter filter = mock(RequestFilterMockBase.class); FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filter) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/filtered/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(filter, times(1)).filter(any(HttpRequest.class), any(ResponseHandler.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterChangesAreSeenByRequestHandler() throws Exception { final RequestFilter filter = new HeaderRequestFilter("foo", "bar"); FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filter) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); assertThat(requestHandler.getHeaderMap().get("foo").get(0), is("bar")); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterCanRespond() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", new RespondForbiddenFilter()) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.FORBIDDEN)); assertThat(requestHandler.hasBeenInvokedYet(), is(false)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatFilterCanHaveNullCompletionHandler() throws Exception { final int responseStatus = Response.Status.OK; final String responseMessage = "Excellent"; FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", new NullCompletionHandlerFilter(responseStatus, responseMessage)) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html") .expectStatusCode(is(responseStatus)) .expectContent(is(responseMessage)); assertThat(requestHandler.hasBeenInvokedYet(), is(false)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterExecutionIsExceptionSafe() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", new ThrowingRequestFilter()) .addRequestFilterBinding("my-request-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.INTERNAL_SERVER_ERROR)); assertThat(requestHandler.hasBeenInvokedYet(), is(false)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterIsNotRunOnUnboundPath() throws Exception { final ResponseFilter filter = mock(ResponseFilterMockBase.class); FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", filter) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(filter, never()).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterIsRunOnBoundPath() throws Exception { final ResponseFilter filter = mock(ResponseFilterMockBase.class); FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", filter) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/filtered/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(filter, times(1)).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterChangesAreWrittenToResponse() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", new HeaderResponseFilter("foo", "bar")) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html") .expectHeader("foo", is("bar")); assertThat(requestHandler.awaitInvocation(), is(true)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterExecutionIsExceptionSafe() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", new ThrowingResponseFilter()) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.INTERNAL_SERVER_ERROR)); assertThat(requestHandler.awaitInvocation(), is(true)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterAndResponseFilterCanBindToSamePath() throws Exception { final RequestFilter requestFilter = mock(RequestFilterMockBase.class); final ResponseFilter responseFilter = mock(ResponseFilterMockBase.class); final String uriPattern = "http: FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", requestFilter) .addRequestFilterBinding("my-request-filter", uriPattern) .addResponseFilter("my-response-filter", responseFilter) .addResponseFilterBinding("my-response-filter", uriPattern) .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(requestFilter, times(1)).filter(any(HttpRequest.class), any(ResponseHandler.class)); verify(responseFilter, times(1)).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFromRequestFilterGoesThroughResponseFilter() throws Exception { FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", new RespondForbiddenFilter()) .addRequestFilterBinding("my-request-filter", "http: .addResponseFilter("my-response-filter", new HeaderResponseFilter("foo", "bar")) .addResponseFilterBinding("my-response-filter", "http: .build(); final MyRequestHandler requestHandler = new MyRequestHandler(); final TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html") .expectStatusCode(is(Response.Status.FORBIDDEN)) .expectHeader("foo", is("bar")); assertThat(requestHandler.hasBeenInvokedYet(), is(false)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterChainRetainsFilters() { final RequestFilter requestFilter1 = mock(RequestFilter.class); final RequestFilter requestFilter2 = mock(RequestFilter.class); verify(requestFilter1, never()).refer(); verify(requestFilter2, never()).refer(); final ResourceReference reference1 = mock(ResourceReference.class); final ResourceReference reference2 = mock(ResourceReference.class); when(requestFilter1.refer()).thenReturn(reference1); when(requestFilter2.refer()).thenReturn(reference2); final RequestFilter chain = RequestFilterChain.newInstance(requestFilter1, requestFilter2); verify(requestFilter1, times(1)).refer(); verify(requestFilter2, times(1)).refer(); verify(reference1, never()).close(); verify(reference2, never()).close(); chain.release(); verify(reference1, times(1)).close(); verify(reference2, times(1)).close(); } @Test public void requireThatRequestFilterChainIsRun() throws Exception { final RequestFilter requestFilter1 = mock(RequestFilter.class); final RequestFilter requestFilter2 = mock(RequestFilter.class); final RequestFilter requestFilterChain = RequestFilterChain.newInstance(requestFilter1, requestFilter2); final HttpRequest request = null; final ResponseHandler responseHandler = null; requestFilterChain.filter(request, responseHandler); verify(requestFilter1).filter(isNull(), any(ResponseHandler.class)); verify(requestFilter2).filter(isNull(), any(ResponseHandler.class)); } @Test public void requireThatRequestFilterChainCallsFilterWithOriginalRequest() throws Exception { final RequestFilter requestFilter = mock(RequestFilter.class); final RequestFilter requestFilterChain = RequestFilterChain.newInstance(requestFilter); final HttpRequest request = mock(HttpRequest.class); final ResponseHandler responseHandler = null; requestFilterChain.filter(request, responseHandler); final ArgumentCaptor<HttpRequest> requestCaptor = ArgumentCaptor.forClass(HttpRequest.class); verify(requestFilter).filter(requestCaptor.capture(), isNull()); verify(request, never()).getUri(); requestCaptor.getValue().getUri(); verify(request, times(1)).getUri(); } @Test public void requireThatRequestFilterChainCallsFilterWithOriginalResponseHandler() throws Exception { final RequestFilter requestFilter = mock(RequestFilter.class); final RequestFilter requestFilterChain = RequestFilterChain.newInstance(requestFilter); final HttpRequest request = null; final ResponseHandler responseHandler = mock(ResponseHandler.class); requestFilterChain.filter(request, responseHandler); final ArgumentCaptor<ResponseHandler> responseHandlerCaptor = ArgumentCaptor.forClass(ResponseHandler.class); verify(requestFilter).filter(isNull(), responseHandlerCaptor.capture()); verify(responseHandler, never()).handleResponse(any(Response.class)); responseHandlerCaptor.getValue().handleResponse(mock(Response.class)); verify(responseHandler, times(1)).handleResponse(any(Response.class)); } @Test public void requireThatRequestFilterCanTerminateChain() throws Exception { final RequestFilter requestFilter1 = new RespondForbiddenFilter(); final RequestFilter requestFilter2 = mock(RequestFilter.class); final RequestFilter requestFilterChain = RequestFilterChain.newInstance(requestFilter1, requestFilter2); final HttpRequest request = null; final ResponseHandler responseHandler = mock(ResponseHandler.class); when(responseHandler.handleResponse(any(Response.class))).thenReturn(mock(ContentChannel.class)); requestFilterChain.filter(request, responseHandler); verify(requestFilter2, never()).filter(any(HttpRequest.class), any(ResponseHandler.class)); final ArgumentCaptor<Response> responseCaptor = ArgumentCaptor.forClass(Response.class); verify(responseHandler).handleResponse(responseCaptor.capture()); assertThat(responseCaptor.getValue().getStatus(), is(Response.Status.FORBIDDEN)); } @Test public void requireThatResponseFilterChainRetainsFilters() { final ResponseFilter responseFilter1 = mock(ResponseFilter.class); final ResponseFilter responseFilter2 = mock(ResponseFilter.class); verify(responseFilter1, never()).refer(); verify(responseFilter2, never()).refer(); final ResourceReference reference1 = mock(ResourceReference.class); final ResourceReference reference2 = mock(ResourceReference.class); when(responseFilter1.refer()).thenReturn(reference1); when(responseFilter2.refer()).thenReturn(reference2); final ResponseFilter chain = ResponseFilterChain.newInstance(responseFilter1, responseFilter2); verify(responseFilter1, times(1)).refer(); verify(responseFilter2, times(1)).refer(); verify(reference1, never()).close(); verify(reference2, never()).close(); chain.release(); verify(reference1, times(1)).close(); verify(reference2, times(1)).close(); } @Test public void requireThatResponseFilterChainIsRun() { final ResponseFilter responseFilter1 = new ResponseHeaderFilter("foo", "bar"); final ResponseFilter responseFilter2 = mock(ResponseFilter.class); final int statusCode = Response.Status.BAD_GATEWAY; final Response response = new Response(statusCode); final Request request = null; ResponseFilterChain.newInstance(responseFilter1, responseFilter2).filter(response, request); final ArgumentCaptor<Response> responseCaptor = ArgumentCaptor.forClass(Response.class); verify(responseFilter2).filter(responseCaptor.capture(), isNull()); assertThat(responseCaptor.getValue().getStatus(), is(statusCode)); assertThat(responseCaptor.getValue().headers().getFirst("foo"), is("bar")); assertThat(response.getStatus(), is(statusCode)); assertThat(response.headers().getFirst("foo"), is("bar")); } @Test public void requireThatDefaultRequestFilterChainIsRunIfNoOtherFilterChainMatches() throws IOException, InterruptedException { RequestFilter filterWithBinding = mock(RequestFilter.class); RequestFilter defaultFilter = mock(RequestFilter.class); String defaultFilterId = "default-request-filter"; FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filterWithBinding) .addRequestFilterBinding("my-request-filter", "http: .addRequestFilter(defaultFilterId, defaultFilter) .setRequestFilterDefaultForPort(defaultFilterId, 0) .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(defaultFilter, times(1)).filter(any(HttpRequest.class), any(ResponseHandler.class)); verify(filterWithBinding, never()).filter(any(HttpRequest.class), any(ResponseHandler.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatDefaultResponseFilterChainIsRunIfNoOtherFilterChainMatches() throws IOException, InterruptedException { ResponseFilter filterWithBinding = mock(ResponseFilter.class); ResponseFilter defaultFilter = mock(ResponseFilter.class); String defaultFilterId = "default-response-filter"; FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", filterWithBinding) .addResponseFilterBinding("my-response-filter", "http: .addResponseFilter(defaultFilterId, defaultFilter) .setResponseFilterDefaultForPort(defaultFilterId, 0) .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(defaultFilter, times(1)).filter(any(Response.class), any(Request.class)); verify(filterWithBinding, never()).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatRequestFilterWithBindingMatchHasPrecedenceOverDefaultFilter() throws IOException, InterruptedException { RequestFilterMockBase filterWithBinding = mock(RequestFilterMockBase.class); RequestFilterMockBase defaultFilter = mock(RequestFilterMockBase.class); String defaultFilterId = "default-request-filter"; FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filterWithBinding) .addRequestFilterBinding("my-request-filter", "http: .addRequestFilter(defaultFilterId, defaultFilter) .setRequestFilterDefaultForPort(defaultFilterId, 0) .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/filtered/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(defaultFilter, never()).filter(any(HttpRequest.class), any(ResponseHandler.class)); verify(filterWithBinding).filter(any(HttpRequest.class), any(ResponseHandler.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatResponseFilterWithBindingMatchHasPrecedenceOverDefaultFilter() throws IOException, InterruptedException { ResponseFilter filterWithBinding = mock(ResponseFilter.class); ResponseFilter defaultFilter = mock(ResponseFilter.class); String defaultFilterId = "default-response-filter"; FilterBindings filterBindings = new FilterBindings.Builder() .addResponseFilter("my-response-filter", filterWithBinding) .addResponseFilterBinding("my-response-filter", "http: .addResponseFilter(defaultFilterId, defaultFilter) .setResponseFilterDefaultForPort(defaultFilterId, 0) .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings); testDriver.client().get("/filtered/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(defaultFilter, never()).filter(any(Response.class), any(Request.class)); verify(filterWithBinding, times(1)).filter(any(Response.class), any(Request.class)); assertThat(testDriver.close(), is(true)); } @Test public void requireThatMetricAreReported() throws IOException, InterruptedException { FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", mock(RequestFilter.class)) .addRequestFilterBinding("my-request-filter", "http: .build(); MetricConsumerMock metricConsumerMock = new MetricConsumerMock(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings, metricConsumerMock, false); testDriver.client().get("/status.html"); assertThat(requestHandler.awaitInvocation(), is(true)); verify(metricConsumerMock.mockitoMock()) .add(MetricDefinitions.FILTERING_REQUEST_HANDLED, 1L, MetricConsumerMock.STATIC_CONTEXT); verify(metricConsumerMock.mockitoMock(), never()) .add(MetricDefinitions.FILTERING_REQUEST_UNHANDLED, 1L, MetricConsumerMock.STATIC_CONTEXT); verify(metricConsumerMock.mockitoMock(), never()) .add(MetricDefinitions.FILTERING_RESPONSE_HANDLED, 1L, MetricConsumerMock.STATIC_CONTEXT); verify(metricConsumerMock.mockitoMock()) .add(MetricDefinitions.FILTERING_RESPONSE_UNHANDLED, 1L, MetricConsumerMock.STATIC_CONTEXT); assertThat(testDriver.close(), is(true)); } @Test public void requireThatStrictFilteringRejectsRequestsNotMatchingFilterChains() throws IOException { RequestFilter filter = mock(RequestFilter.class); FilterBindings filterBindings = new FilterBindings.Builder() .addRequestFilter("my-request-filter", filter) .addRequestFilterBinding("my-request-filter", "http: .build(); MyRequestHandler requestHandler = new MyRequestHandler(); TestDriver testDriver = newDriver(requestHandler, filterBindings, new MetricConsumerMock(), true); testDriver.client().get("/unfiltered/") .expectStatusCode(is(Response.Status.FORBIDDEN)) .expectContent(containsString("Request did not match any request filter chain")); verify(filter, never()).filter(any(), any()); assertThat(testDriver.close(), is(true)); } private static TestDriver newDriver(MyRequestHandler requestHandler, FilterBindings filterBindings) { return newDriver(requestHandler, filterBindings, new MetricConsumerMock(), false); } private static TestDriver newDriver( MyRequestHandler requestHandler, FilterBindings filterBindings, MetricConsumerMock metricConsumer, boolean strictFiltering) { return TestDriver.newInstance( JettyHttpServer.class, requestHandler, newFilterModule(filterBindings, metricConsumer, strictFiltering)); } private static com.google.inject.Module newFilterModule( FilterBindings filterBindings, MetricConsumerMock metricConsumer, boolean strictFiltering) { return Modules.combine( , new ConnectorFactoryRegistryModule(), metricConsumer.asGuiceModule()); } private static abstract class RequestFilterMockBase extends AbstractResource implements RequestFilter {} private static abstract class ResponseFilterMockBase extends AbstractResource implements ResponseFilter {} private static class MyRequestHandler extends AbstractRequestHandler { private final CountDownLatch invocationLatch = new CountDownLatch(1); private final AtomicReference<Map<String, List<String>>> headerCopy = new AtomicReference<>(null); @Override public ContentChannel handleRequest(final Request request, final ResponseHandler handler) { try { headerCopy.set(new HashMap<String, List<String>>(request.headers())); ResponseDispatch.newInstance(Response.Status.OK).dispatch(handler); return null; } finally { invocationLatch.countDown(); } } public boolean hasBeenInvokedYet() { return invocationLatch.getCount() == 0L; } public boolean awaitInvocation() throws InterruptedException { return invocationLatch.await(60, TimeUnit.SECONDS); } public Map<String, List<String>> getHeaderMap() { return headerCopy.get(); } } private static class RespondForbiddenFilter extends AbstractResource implements RequestFilter { @Override public void filter(final HttpRequest request, final ResponseHandler handler) { ResponseDispatch.newInstance(Response.Status.FORBIDDEN).dispatch(handler); } } private static class ThrowingRequestFilter extends AbstractResource implements RequestFilter { @Override public void filter(final HttpRequest request, final ResponseHandler handler) { throw new RuntimeException(); } } private static class ThrowingResponseFilter extends AbstractResource implements ResponseFilter { @Override public void filter(final Response response, final Request request) { throw new RuntimeException(); } } private static class HeaderRequestFilter extends AbstractResource implements RequestFilter { private final String key; private final String val; public HeaderRequestFilter(final String key, final String val) { this.key = key; this.val = val; } @Override public void filter(final HttpRequest request, final ResponseHandler handler) { request.headers().add(key, val); } } private static class HeaderResponseFilter extends AbstractResource implements ResponseFilter { private final String key; private final String val; public HeaderResponseFilter(final String key, final String val) { this.key = key; this.val = val; } @Override public void filter(final Response response, final Request request) { response.headers().add(key, val); } } public class NullCompletionHandlerFilter extends AbstractResource implements RequestFilter { private final int responseStatus; private final String responseMessage; public NullCompletionHandlerFilter(final int responseStatus, final String responseMessage) { this.responseStatus = responseStatus; this.responseMessage = responseMessage; } @Override public void filter(final HttpRequest request, final ResponseHandler responseHandler) { final HttpResponse response = HttpResponse.newInstance(responseStatus); final ContentChannel channel = responseHandler.handleResponse(response); final CompletionHandler completionHandler = null; channel.write(ByteBuffer.wrap(responseMessage.getBytes()), completionHandler); channel.close(null); } } }
It's pretty tricky to see that this big ol' block of code is related to the above `if`-statement, please consider adding bracing
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); if (redundancy() != null) clusterBuilder.redundancy(redundancy().effectiveFinalRedundancy()); if (getRootGroup() != null) clusterBuilder.group(getRootGroup().getGroupStructureConfig().stream() .map(StorDistributionConfig.Group.Builder::build) .map(config -> new DistributionConfig.Cluster.Group.Builder() .index(config.index()) .name(config.name()) .capacity(config.capacity()) .partitions(config.partitions()) .nodes(config.nodes().stream() .map(node -> new DistributionConfig.Cluster.Group.Nodes.Builder() .index(node.index()) .retired(node.retired())) .collect(toList()))) .collect(toList())); builder.cluster(getConfigId(), clusterBuilder); }
.collect(toList()));
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); StorDistributionConfig.Builder storDistributionBuilder = new StorDistributionConfig.Builder(); getConfig(storDistributionBuilder); StorDistributionConfig config = storDistributionBuilder.build(); clusterBuilder.active_per_leaf_group(config.active_per_leaf_group()); clusterBuilder.ready_copies(config.ready_copies()); clusterBuilder.redundancy(config.redundancy()); clusterBuilder.initial_redundancy(config.initial_redundancy()); for (StorDistributionConfig.Group group : config.group()) { DistributionConfig.Cluster.Group.Builder groupBuilder = new DistributionConfig.Cluster.Group.Builder(); groupBuilder.index(group.index()) .name(group.name()) .capacity(group.capacity()) .partitions(group.partitions()); for (var node : group.nodes()) { DistributionConfig.Cluster.Group.Nodes.Builder nodesBuilder = new DistributionConfig.Cluster.Group.Nodes.Builder(); nodesBuilder.index(node.index()) .retired(node.retired()); groupBuilder.nodes(nodesBuilder); } clusterBuilder.group(groupBuilder); } builder.cluster(getConfigId(), clusterBuilder); }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
Is the config ID always identical to the cluster name?
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); if (redundancy() != null) clusterBuilder.redundancy(redundancy().effectiveFinalRedundancy()); if (getRootGroup() != null) clusterBuilder.group(getRootGroup().getGroupStructureConfig().stream() .map(StorDistributionConfig.Group.Builder::build) .map(config -> new DistributionConfig.Cluster.Group.Builder() .index(config.index()) .name(config.name()) .capacity(config.capacity()) .partitions(config.partitions()) .nodes(config.nodes().stream() .map(node -> new DistributionConfig.Cluster.Group.Nodes.Builder() .index(node.index()) .retired(node.retired())) .collect(toList()))) .collect(toList())); builder.cluster(getConfigId(), clusterBuilder); }
builder.cluster(getConfigId(), clusterBuilder);
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); StorDistributionConfig.Builder storDistributionBuilder = new StorDistributionConfig.Builder(); getConfig(storDistributionBuilder); StorDistributionConfig config = storDistributionBuilder.build(); clusterBuilder.active_per_leaf_group(config.active_per_leaf_group()); clusterBuilder.ready_copies(config.ready_copies()); clusterBuilder.redundancy(config.redundancy()); clusterBuilder.initial_redundancy(config.initial_redundancy()); for (StorDistributionConfig.Group group : config.group()) { DistributionConfig.Cluster.Group.Builder groupBuilder = new DistributionConfig.Cluster.Group.Builder(); groupBuilder.index(group.index()) .name(group.name()) .capacity(group.capacity()) .partitions(group.partitions()); for (var node : group.nodes()) { DistributionConfig.Cluster.Group.Nodes.Builder nodesBuilder = new DistributionConfig.Cluster.Group.Nodes.Builder(); nodesBuilder.index(node.index()) .retired(node.retired()); groupBuilder.nodes(nodesBuilder); } clusterBuilder.group(groupBuilder); } builder.cluster(getConfigId(), clusterBuilder); }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
I feel that according to our code standard you must be able to read the entire statement out loud without drawing breath or it should be split up ...
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); if (redundancy() != null) clusterBuilder.redundancy(redundancy().effectiveFinalRedundancy()); if (getRootGroup() != null) clusterBuilder.group(getRootGroup().getGroupStructureConfig().stream() .map(StorDistributionConfig.Group.Builder::build) .map(config -> new DistributionConfig.Cluster.Group.Builder() .index(config.index()) .name(config.name()) .capacity(config.capacity()) .partitions(config.partitions()) .nodes(config.nodes().stream() .map(node -> new DistributionConfig.Cluster.Group.Nodes.Builder() .index(node.index()) .retired(node.retired())) .collect(toList()))) .collect(toList())); builder.cluster(getConfigId(), clusterBuilder); }
.collect(toList()));
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); StorDistributionConfig.Builder storDistributionBuilder = new StorDistributionConfig.Builder(); getConfig(storDistributionBuilder); StorDistributionConfig config = storDistributionBuilder.build(); clusterBuilder.active_per_leaf_group(config.active_per_leaf_group()); clusterBuilder.ready_copies(config.ready_copies()); clusterBuilder.redundancy(config.redundancy()); clusterBuilder.initial_redundancy(config.initial_redundancy()); for (StorDistributionConfig.Group group : config.group()) { DistributionConfig.Cluster.Group.Builder groupBuilder = new DistributionConfig.Cluster.Group.Builder(); groupBuilder.index(group.index()) .name(group.name()) .capacity(group.capacity()) .partitions(group.partitions()); for (var node : group.nodes()) { DistributionConfig.Cluster.Group.Nodes.Builder nodesBuilder = new DistributionConfig.Cluster.Group.Nodes.Builder(); nodesBuilder.index(node.index()) .retired(node.retired()); groupBuilder.nodes(nodesBuilder); } clusterBuilder.group(groupBuilder); } builder.cluster(getConfigId(), clusterBuilder); }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
Yes. If that changes, lots of things break.
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); if (redundancy() != null) clusterBuilder.redundancy(redundancy().effectiveFinalRedundancy()); if (getRootGroup() != null) clusterBuilder.group(getRootGroup().getGroupStructureConfig().stream() .map(StorDistributionConfig.Group.Builder::build) .map(config -> new DistributionConfig.Cluster.Group.Builder() .index(config.index()) .name(config.name()) .capacity(config.capacity()) .partitions(config.partitions()) .nodes(config.nodes().stream() .map(node -> new DistributionConfig.Cluster.Group.Nodes.Builder() .index(node.index()) .retired(node.retired())) .collect(toList()))) .collect(toList())); builder.cluster(getConfigId(), clusterBuilder); }
builder.cluster(getConfigId(), clusterBuilder);
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); StorDistributionConfig.Builder storDistributionBuilder = new StorDistributionConfig.Builder(); getConfig(storDistributionBuilder); StorDistributionConfig config = storDistributionBuilder.build(); clusterBuilder.active_per_leaf_group(config.active_per_leaf_group()); clusterBuilder.ready_copies(config.ready_copies()); clusterBuilder.redundancy(config.redundancy()); clusterBuilder.initial_redundancy(config.initial_redundancy()); for (StorDistributionConfig.Group group : config.group()) { DistributionConfig.Cluster.Group.Builder groupBuilder = new DistributionConfig.Cluster.Group.Builder(); groupBuilder.index(group.index()) .name(group.name()) .capacity(group.capacity()) .partitions(group.partitions()); for (var node : group.nodes()) { DistributionConfig.Cluster.Group.Nodes.Builder nodesBuilder = new DistributionConfig.Cluster.Group.Nodes.Builder(); nodesBuilder.index(node.index()) .retired(node.retired()); groupBuilder.nodes(nodesBuilder); } clusterBuilder.group(groupBuilder); } builder.cluster(getConfigId(), clusterBuilder); }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
add sessionId to warning?
private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; List<Future<Long>> futures = new ArrayList<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.add(executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); }
log.log(Level.WARNING, "Could not load session");
private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; Map<Long, Future<?>> futures = new HashMap<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.put(sessionId, executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { e.printStackTrace(); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { List<Future<Long>> futures = new ArrayList<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.add(executor.submit(() -> sessionAdded(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public long sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return sessionId; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); return sessionId; } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ long createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); return sessionId; } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
🌬️ Barely made it! 😓 So the idea was simple: grab a builder of the similar config, and do a deep conversion of it to this config builder type. Starting with `clusterBuilder.group(...)` you know everything inside is one group-builder, or a list of them. Streams make it clearer, I thought! I see now that the whole simple conversion thing wasn't obvious, though, since the next thing is `getRootGroup().getGroupStructureConfig()...` 😖 Fine, fine I offer you this next commit in penitence.
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); if (redundancy() != null) clusterBuilder.redundancy(redundancy().effectiveFinalRedundancy()); if (getRootGroup() != null) clusterBuilder.group(getRootGroup().getGroupStructureConfig().stream() .map(StorDistributionConfig.Group.Builder::build) .map(config -> new DistributionConfig.Cluster.Group.Builder() .index(config.index()) .name(config.name()) .capacity(config.capacity()) .partitions(config.partitions()) .nodes(config.nodes().stream() .map(node -> new DistributionConfig.Cluster.Group.Nodes.Builder() .index(node.index()) .retired(node.retired())) .collect(toList()))) .collect(toList())); builder.cluster(getConfigId(), clusterBuilder); }
.collect(toList()));
public void getConfig(DistributionConfig.Builder builder) { DistributionConfig.Cluster.Builder clusterBuilder = new DistributionConfig.Cluster.Builder(); StorDistributionConfig.Builder storDistributionBuilder = new StorDistributionConfig.Builder(); getConfig(storDistributionBuilder); StorDistributionConfig config = storDistributionBuilder.build(); clusterBuilder.active_per_leaf_group(config.active_per_leaf_group()); clusterBuilder.ready_copies(config.ready_copies()); clusterBuilder.redundancy(config.redundancy()); clusterBuilder.initial_redundancy(config.initial_redundancy()); for (StorDistributionConfig.Group group : config.group()) { DistributionConfig.Cluster.Group.Builder groupBuilder = new DistributionConfig.Cluster.Group.Builder(); groupBuilder.index(group.index()) .name(group.name()) .capacity(group.capacity()) .partitions(group.partitions()); for (var node : group.nodes()) { DistributionConfig.Cluster.Group.Nodes.Builder nodesBuilder = new DistributionConfig.Cluster.Group.Nodes.Builder(); nodesBuilder.index(node.index()) .retired(node.retired()); groupBuilder.nodes(nodesBuilder); } clusterBuilder.group(groupBuilder); } builder.cluster(getConfigId(), clusterBuilder); }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
class Builder { /** The admin model of this system or null if none (which only happens in tests) */ private final Admin admin; public Builder(Admin admin) { this.admin = admin; } public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) { ModelElement contentElement = new ModelElement(w3cContentElement); DeployState deployState = context.getDeployState(); ModelElement documentsElement = contentElement.child("documents"); Map<String, NewDocumentType> documentDefinitions = new SearchDefinitionBuilder().build(deployState.getDocumentModel().getDocumentManager(), documentsElement); String routingSelection = new DocumentSelectionBuilder().build(documentsElement); RedundancyBuilder redundancyBuilder = new RedundancyBuilder(contentElement); Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement); ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterId(contentElement), documentDefinitions, globallyDistributedDocuments, routingSelection, deployState.zone(), deployState.isHosted()); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement), contentElement).build(deployState, c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments, isCombined(getClusterId(contentElement), containers)) .build(deployState, c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); c.storageNodes = new StorageCluster.Builder().build(deployState, c, w3cContentElement); c.distributorNodes = new DistributorCluster.Builder(c).build(deployState, c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, context).buildRootGroup(deployState, redundancyBuilder, c); validateThatGroupSiblingsAreUnique(c.clusterId, c.rootGroup); c.search.handleRedundancy(c.redundancy); setupSearchCluster(c.search, contentElement, deployState.getDeployLogger()); if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) throw new RuntimeException("Indexed search requires proton as engine"); if (documentsElement != null) { ModelElement e = documentsElement.child("document-processing"); if (e != null) setupDocumentProcessing(c, e); } else if (c.persistenceFactory != null) { throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified."); } ModelElement tuning = contentElement.child("tuning"); if (tuning != null) setupTuning(c, tuning); ModelElement experimental = contentElement.child("experimental"); if (experimental != null) setupExperimental(c, experimental); if (context.getParentProducer().getRoot() == null) return c; addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterId, c); return c; } private void setupSearchCluster(ContentSearchCluster csc, ModelElement element, DeployLogger logger) { ContentSearch search = DomContentSearchBuilder.build(element); Double visibilityDelay = search.getVisibilityDelay(); if (visibilityDelay != null) { csc.setVisibilityDelay(visibilityDelay); } if (csc.hasIndexedCluster()) { setupIndexedCluster(csc.getIndexed(), search, element, logger); } } private void setupIndexedCluster(IndexedSearchCluster index, ContentSearch search, ModelElement element, DeployLogger logger) { Double queryTimeout = search.getQueryTimeout(); if (queryTimeout != null) { Preconditions.checkState(index.getQueryTimeout() == null, "In " + index + ": You may not specify query-timeout in both proton and content."); index.setQueryTimeout(queryTimeout); } index.setSearchCoverage(DomSearchCoverageBuilder.build(element)); index.setDispatchSpec(DomDispatchBuilder.build(element)); if (index.getTuning() == null) index.setTuning(new Tuning(index)); index.getTuning().dispatch = DomTuningDispatchBuilder.build(element, logger); } private void setupDocumentProcessing(ContentCluster c, ModelElement e) { String docprocCluster = e.stringAttribute("cluster"); if (docprocCluster != null) { docprocCluster = docprocCluster.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocCluster != null && !docprocCluster.isEmpty()) { c.getSearch().getIndexed().setIndexingClusterName(docprocCluster); } } String docprocChain = e.stringAttribute("chain"); if (docprocChain != null) { docprocChain = docprocChain.trim(); } if (c.getSearch().hasIndexedCluster()) { if (docprocChain != null && !docprocChain.isEmpty()) { c.getSearch().getIndexed().setIndexingChainName(docprocChain); } } } private void setupTuning(ContentCluster c, ModelElement tuning) { ModelElement distribution = tuning.child("distribution"); if (distribution != null) { String attr = distribution.stringAttribute("type"); if (attr != null) { if (attr.toLowerCase().equals("strict")) { c.distributionMode = DistributionMode.STRICT; } else if (attr.toLowerCase().equals("loose")) { c.distributionMode = DistributionMode.LOOSE; } else if (attr.toLowerCase().equals("legacy")) { c.distributionMode = DistributionMode.LEGACY; } else { throw new IllegalStateException("Distribution type " + attr + " not supported."); } } } ModelElement merges = tuning.child("merges"); if (merges != null) { Integer attr = merges.integerAttribute("max-nodes-per-merge"); if (attr != null) { c.maxNodesPerMerge = attr; } } } /** Returns whether this hosts one of the given container clusters */ private boolean isCombined(String clusterId, Collection<ContainerModel> containers) { return containers.stream() .map(model -> model.getCluster().getHostClusterId()) .filter(Optional::isPresent) .anyMatch(id -> id.get().equals(clusterId)); } private void setupExperimental(ContentCluster cluster, ModelElement experimental) { } private void validateGroupSiblings(String cluster, StorageGroup group) { Set<String> siblings = new HashSet<>(); for (StorageGroup g : group.getSubgroups()) { String name = g.getName(); if (siblings.contains(name)) { throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " + "with name '" + name + "' in the same subgroup. Group sibling names must be unique."); } siblings.add(name); } } private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) { if (group == null) { return; } validateGroupSiblings(cluster, group); for (StorageGroup g : group.getSubgroups()) { validateThatGroupSiblingsAreUnique(cluster, g); } } private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context, StorageGroup rootGroup, ModelElement contentElement, String contentClusterName, ContentCluster contentCluster) { if (admin == null) return; if (contentCluster.getPersistence() == null) return; ClusterControllerContainerCluster clusterControllers; ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster); if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) { clusterControllers = overlappingCluster.getClusterControllers(); } else if (admin.multitenant()) { String clusterName = contentClusterName + "-controllers"; NodesSpecification nodesSpecification = NodesSpecification.optionalDedicatedFromParent(contentElement.child("controllers"), context) .orElse(NodesSpecification.nonDedicated(3, context)); Collection<HostResource> hosts = nodesSpecification.isDedicated() ? getControllerHosts(nodesSpecification, admin, clusterName, context) : drawControllerHosts(nodesSpecification.minResources().nodes(), rootGroup, containers); clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true, context.getDeployState()); contentCluster.clusterControllers = clusterControllers; } else { clusterControllers = admin.getClusterControllers(); if (clusterControllers == null) { List<HostResource> hosts = admin.getClusterControllerHosts(); if (hosts.size() > 1) { context.getDeployState().getDeployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly."); } clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState()); admin.setClusterControllers(clusterControllers); } } addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster); ReindexingContext reindexingContext = clusterControllers.reindexingContext(); contentCluster.documentDefinitions.values() .forEach(type -> reindexingContext.addDocumentType(contentCluster.clusterId, type)); } /** Returns any other content cluster which shares nodes with this, or null if none are built */ private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) { for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) { if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster)) return otherContentCluster; } return null; } private boolean overlaps(ContentCluster c1, ContentCluster c2) { Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet()); return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty(); } private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) { return nodesSpecification.provision(admin.hostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger(), false).keySet(); } private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawControllerHosts(count, false, rootGroup, containers); List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup, containers); List<HostResource> all = new ArrayList<>(hosts); all.addAll(retiredHosts); return all; } private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup, Collection<ContainerModel> containers) { List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup); if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) hosts = hosts.subList(0, hosts.size()-1); return hosts; } /** * Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible * if less than <code>count</code> are available. * * This will draw the same nodes each time it is * invoked if cluster names and node indexes are unchanged. */ private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters, Set<HostResource> usedHosts) { if (containerClusters.isEmpty()) return Collections.emptyList(); List<HostResource> allHosts = new ArrayList<>(); for (ApplicationContainerCluster cluster : clustersSortedByName(containerClusters)) allHosts.addAll(hostResourcesSortedByIndex(cluster)); List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream() .filter(h -> ! usedHosts.contains(h)) .filter(h -> ! hostHasClusterController(h.getHostname(), allHosts)) .distinct() .collect(Collectors.toList()); return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count)); } private List<ApplicationContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) { return containerModels.stream() .map(ContainerModel::getCluster) .filter(cluster -> cluster instanceof ApplicationContainerCluster) .map(cluster -> (ApplicationContainerCluster) cluster) .sorted(Comparator.comparing(ContainerCluster::getName)) .collect(Collectors.toList()); } private List<HostResource> hostResourcesSortedByIndex(ApplicationContainerCluster cluster) { return cluster.getContainers().stream() .sorted(Comparator.comparing(Container::index)) .map(Container::getHostResource) .collect(Collectors.toList()); } /** Returns whether any host having the given hostname has a cluster controller */ private boolean hostHasClusterController(String hostname, List<HostResource> hosts) { for (HostResource host : hosts) { if ( ! host.getHostname().equals(hostname)) continue; if (hasClusterController(host)) return true; } return false; } private boolean hasClusterController(HostResource host) { for (Service service : host.getServices()) if (service instanceof ClusterControllerContainer) return true; return false; } /** * Draw <code>count</code> nodes from as many different content groups below this as possible. * This will only achieve maximum spread in the case where the groups are balanced and never on the same * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy. */ private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) { Set<HostResource> hosts = new HashSet<>(); if (group.getNodes().isEmpty()) { int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size()); for (StorageGroup subgroup : group.getSubgroups()) hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup)); } else { hosts.addAll(group.getNodes().stream() .filter(node -> node.isRetired() == retired) .map(StorageNode::getHostResource).collect(Collectors.toList())); } List<HostResource> sortedHosts = new ArrayList<>(hosts); sortedHosts.sort((a, b) -> (a.comparePrimarilyByIndexTo(b))); sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size())); return sortedHosts; } private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant, DeployState deployState) { var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState); List<ClusterControllerContainer> containers = new ArrayList<>(); if (clusterControllers.getContainers().isEmpty()) { int index = 0; for (HostResource host : hosts) { var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant, deployState); clusterControllerContainer.setHostResource(host); clusterControllerContainer.initService(deployState.getDeployLogger()); clusterControllerContainer.setProp("clustertype", "admin") .setProp("clustername", clusterControllers.getName()) .setProp("index", String.valueOf(index)); containers.add(clusterControllerContainer); ++index; } } clusterControllers.addContainers(containers); return clusterControllers; } private void addClusterControllerComponentsForThisCluster(ClusterControllerContainerCluster clusterControllers, ContentCluster contentCluster) { int index = 0; for (var container : clusterControllers.getContainers()) { if ( ! hasClusterControllerComponent(container)) container.addComponent(new ClusterControllerComponent()); container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size())); } } private boolean hasClusterControllerComponent(Container container) { for (Object o : container.getComponents().getComponents()) if (o instanceof ClusterControllerComponent) return true; return false; } }
`logFileHandler` always non-null?
public void shutdown() { logFileHandler.close(); connection.removeHandler(logFileHandler); if (logFileHandler!=null) logFileHandler.shutdown(); }
if (logFileHandler!=null)
public void shutdown() { logFileHandler.close(); connection.removeHandler(logFileHandler); logFileHandler.shutdown(); }
class ConnectionLogHandler { public Logger connection = Logger.getAnonymousLogger(); private final LogFileHandler logFileHandler; public ConnectionLogHandler(String clusterName) { connection.setUseParentHandlers(false); logFileHandler = new LogFileHandler(true); logFileHandler.setFilePattern(String.format("logs/vespa/qrs/connection.%s.%s", clusterName, "%Y%m%d%H%M%S")); LogFormatter lf = new LogFormatter(); lf.messageOnly(true); this.logFileHandler.setFormatter(lf); connection.addHandler(this.logFileHandler); } }
class ConnectionLogHandler { public final Logger connection = Logger.getAnonymousLogger(); private final LogFileHandler logFileHandler; public ConnectionLogHandler(String clusterName) { connection.setUseParentHandlers(false); logFileHandler = new LogFileHandler(true); logFileHandler.setFilePattern(String.format("logs/vespa/qrs/connection.%s.%s", clusterName, "%Y%m%d%H%M%S")); LogFormatter lf = new LogFormatter(); lf.messageOnly(true); this.logFileHandler.setFormatter(lf); connection.addHandler(this.logFileHandler); } }
```suggestion return new Error(ErrorCode.FATAL_ERROR, ResultType.CONDITION_NOT_MET_ERROR.name()); ```
public static Error toError(ResultType result) { switch (result) { case TRANSIENT_ERROR: return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.TRANSIENT_ERROR.name()); case CONDITION_NOT_MET_ERROR: return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.CONDITION_NOT_MET_ERROR.name()); case FATAL_ERROR: return new Error(ErrorCode.FATAL_ERROR, ResultType.FATAL_ERROR.name()); } return new Error(ErrorCode.NONE, "SUCCESS"); }
return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.CONDITION_NOT_MET_ERROR.name());
public static Error toError(ResultType result) { switch (result) { case TRANSIENT_ERROR: return new Error(ErrorCode.TRANSIENT_ERROR, ResultType.TRANSIENT_ERROR.name()); case CONDITION_NOT_MET_ERROR: return new Error(ErrorCode.FATAL_ERROR, ResultType.CONDITION_NOT_MET_ERROR.name()); case FATAL_ERROR: return new Error(ErrorCode.FATAL_ERROR, ResultType.FATAL_ERROR.name()); } return new Error(ErrorCode.NONE, "SUCCESS"); }
class Result { /** Null if this is a success, set to the error occurring if this is a failure */ private final Error error; /** The id of this operation */ private final long requestId; private final ResultType type; /** Creates a successful result with requestId zero */ public Result() { this(0); } /** * Creates a successful result * * @param requestId the ID of the request */ public Result(long requestId) { this.error = null; this.requestId = requestId; type = ResultType.SUCCESS; } /** * Creates a unsuccessful result * * @deprecated Will be removed on Vespa 8 due to incorrect java.lang.Error */ @Deprecated(forRemoval = true, since="7") public Result(ResultType type, java.lang.Error error) { this.type = type; this.error = new Error(0, error.getMessage()); this.requestId = 0; } /** * Creates a unsuccessful result * * @param type the type of failure * @param error the error to encapsulate in this Result * @see com.yahoo.documentapi.Result.ResultType */ public Result(ResultType type, Error error) { this.type = type; this.error = error; this.requestId = 0; } /** * Returns whether this operation is a success. * If it is a success, the operation is accepted and one or more responses are guaranteed * to arrive within this sessions timeout limit. * If this is not a success, this operation has no further consequences. * * @return true if success */ public boolean isSuccess() { return type == ResultType.SUCCESS; } /** * Returns the error causes by this. If this was not a success, this method always returns an error * If this was a success, this method returns null. * * @return the Error, or null * @deprecated Will be removed on Vespa 8 */ @Deprecated(forRemoval = true, since="7") public java.lang.Error getError() { return new java.lang.Error(error.getMessage()); } public Error error() { return error; } /** * Returns the id of this operation. The asynchronous response to this operation * will contain the same id to allow clients who desire to, to match operations to responses. * * @return the id of this operation */ public long getRequestId() { return requestId; } /** * Returns the type of result. * * @return the type of result, typically if this is an error or a success, and what kind of error. * @see com.yahoo.documentapi.Result.ResultType */ public ResultType type() { return type;} /** The types that a Result can have. */ public enum ResultType { /** The request was successful, no error information is attached. */ SUCCESS, /** The request failed, but may be successful if retried at a later time. */ TRANSIENT_ERROR, /** The request failed, and retrying is pointless. */ FATAL_ERROR, /** Condition specified in operation not met error */ @Deprecated(since = "7", forRemoval = true) CONDITION_NOT_MET_ERROR } }
class Result { /** Null if this is a success, set to the error occurring if this is a failure */ private final Error error; /** The id of this operation */ private final long requestId; private final ResultType type; /** Creates a successful result with requestId zero */ public Result() { this(0); } /** * Creates a successful result * * @param requestId the ID of the request */ public Result(long requestId) { this.error = null; this.requestId = requestId; type = ResultType.SUCCESS; } /** * Creates a unsuccessful result * * @deprecated Will be removed on Vespa 8 due to incorrect java.lang.Error */ @Deprecated(forRemoval = true, since="7") public Result(ResultType type, java.lang.Error error) { this.type = type; this.error = new Error(0, error.getMessage()); this.requestId = 0; } /** * Creates a unsuccessful result * * @param type the type of failure * @param error the error to encapsulate in this Result * @see com.yahoo.documentapi.Result.ResultType */ public Result(ResultType type, Error error) { this.type = type; this.error = error; this.requestId = 0; } /** * Returns whether this operation is a success. * If it is a success, the operation is accepted and one or more responses are guaranteed * to arrive within this sessions timeout limit. * If this is not a success, this operation has no further consequences. * * @return true if success */ public boolean isSuccess() { return type == ResultType.SUCCESS; } /** * Returns the error causes by this. If this was not a success, this method always returns an error * If this was a success, this method returns null. * * @return the Error, or null * @deprecated Will be removed on Vespa 8 */ @Deprecated(forRemoval = true, since="7") public java.lang.Error getError() { return new java.lang.Error(error.getMessage()); } public Error error() { return error; } /** * Returns the id of this operation. The asynchronous response to this operation * will contain the same id to allow clients who desire to, to match operations to responses. * * @return the id of this operation */ public long getRequestId() { return requestId; } /** * Returns the type of result. * * @return the type of result, typically if this is an error or a success, and what kind of error. * @see com.yahoo.documentapi.Result.ResultType */ public ResultType type() { return type;} /** The types that a Result can have. */ public enum ResultType { /** The request was successful, no error information is attached. */ SUCCESS, /** The request failed, but may be successful if retried at a later time. */ TRANSIENT_ERROR, /** The request failed, and retrying is pointless. */ FATAL_ERROR, /** Condition specified in operation not met error */ @Deprecated(since = "7", forRemoval = true) CONDITION_NOT_MET_ERROR } }
```suggestion if (run.start().isBefore(refreshTime) && job.isProduction() && job.isDeployment()) { ``` lest you trigger production tests needlessly.
private void deployRefreshedCertificates() { var now = clock.instant(); curator.readAllEndpointCertificateMetadata().forEach((applicationId, endpointCertificateMetadata) -> endpointCertificateMetadata.lastRefreshed().ifPresent(lastRefreshTime -> { Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime); if (now.isAfter(refreshTime.plus(7, ChronoUnit.DAYS))) { controller().jobController().jobs(applicationId).forEach(job -> controller().jobController().jobStatus(new JobId(applicationId, JobType.fromJobName(job.jobName()))).lastTriggered().ifPresent(run -> { if (run.start().isBefore(refreshTime) && job.isProduction()) { deploymentTrigger.reTrigger(applicationId, job); log.info("Re-triggering deployment job " + job.jobName() + " for instance " + applicationId.serializedForm() + " to roll out refreshed endpoint certificate"); } })); } })); }
if (run.start().isBefore(refreshTime) && job.isProduction()) {
private void deployRefreshedCertificates() { var now = clock.instant(); curator.readAllEndpointCertificateMetadata().forEach((applicationId, endpointCertificateMetadata) -> endpointCertificateMetadata.lastRefreshed().ifPresent(lastRefreshTime -> { Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime); if (now.isAfter(refreshTime.plus(7, ChronoUnit.DAYS))) { controller().jobController().jobs(applicationId).forEach(job -> controller().jobController().jobStatus(new JobId(applicationId, JobType.fromJobName(job.jobName()))).lastTriggered().ifPresent(run -> { if (run.start().isBefore(refreshTime) && job.isProduction() && job.isDeployment()) { deploymentTrigger.reTrigger(applicationId, job); log.info("Re-triggering deployment job " + job.jobName() + " for instance " + applicationId.serializedForm() + " to roll out refreshed endpoint certificate"); } })); } })); }
class EndpointCertificateMaintainer extends ControllerMaintainer { private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName()); private final DeploymentTrigger deploymentTrigger; private final Clock clock; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; private final BooleanFlag useEndpointCertificateMaintainer; public EndpointCertificateMaintainer(Controller controller, Duration interval) { super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic))); this.deploymentTrigger = controller.applications().deploymentTrigger(); this.clock = controller.clock(); this.secretStore = controller.secretStore(); this.curator = controller().curator(); this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider(); this.useEndpointCertificateMaintainer = Flags.USE_ENDPOINT_CERTIFICATE_MAINTAINER.bindTo(controller().flagSource()); } @Override protected boolean maintain() { if (!useEndpointCertificateMaintainer.value()) return true; try { deployRefreshedCertificates(); updateRefreshedCertificates(); deleteUnusedCertificates(); } catch (Exception e) { log.log(LogLevel.ERROR, "Exception caught while maintaining endpoint certificates", e); return false; } return true; } private void updateRefreshedCertificates() { curator.readAllEndpointCertificateMetadata().forEach(((applicationId, endpointCertificateMetadata) -> { var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) { var refreshedCertificateMetadata = endpointCertificateMetadata .withVersion(latestAvailableVersion.getAsInt()) .withLastRefreshed(clock.instant().getEpochSecond()); try (Lock lock = lock(applicationId)) { if (Optional.of(endpointCertificateMetadata).equals(curator.readEndpointCertificateMetadata(applicationId))) { curator.writeEndpointCertificateMetadata(applicationId, refreshedCertificateMetadata); } } } })); } /** * If it's been a week since the cert has been refreshed, re-trigger all prod deployment jobs. */ private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private void deleteUnusedCertificates() { var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { try (Lock lock = lock(applicationId)) { if (Optional.of(storedMetaData).equals(curator.readEndpointCertificateMetadata(applicationId))) { log.log(Level.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments, deleting from provider and ZK"); endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData); curator.deleteEndpointCertificateMetadata(applicationId); } } } }); } private Lock lock(ApplicationId applicationId) { return curator.lock(TenantAndApplicationId.from(applicationId)); } private boolean hasNoDeployments(ApplicationId applicationId) { var deployments = curator.readApplication(TenantAndApplicationId.from(applicationId)) .flatMap(app -> app.get(applicationId.instance())) .map(Instance::deployments); return deployments.isEmpty() || deployments.get().size() == 0; } }
class EndpointCertificateMaintainer extends ControllerMaintainer { private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName()); private final DeploymentTrigger deploymentTrigger; private final Clock clock; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; private final BooleanFlag useEndpointCertificateMaintainer; public EndpointCertificateMaintainer(Controller controller, Duration interval) { super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic))); this.deploymentTrigger = controller.applications().deploymentTrigger(); this.clock = controller.clock(); this.secretStore = controller.secretStore(); this.curator = controller().curator(); this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider(); this.useEndpointCertificateMaintainer = Flags.USE_ENDPOINT_CERTIFICATE_MAINTAINER.bindTo(controller().flagSource()); } @Override protected boolean maintain() { if (!useEndpointCertificateMaintainer.value()) return true; try { deployRefreshedCertificates(); updateRefreshedCertificates(); deleteUnusedCertificates(); } catch (Exception e) { log.log(LogLevel.ERROR, "Exception caught while maintaining endpoint certificates", e); return false; } return true; } private void updateRefreshedCertificates() { curator.readAllEndpointCertificateMetadata().forEach(((applicationId, endpointCertificateMetadata) -> { var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) { var refreshedCertificateMetadata = endpointCertificateMetadata .withVersion(latestAvailableVersion.getAsInt()) .withLastRefreshed(clock.instant().getEpochSecond()); try (Lock lock = lock(applicationId)) { if (Optional.of(endpointCertificateMetadata).equals(curator.readEndpointCertificateMetadata(applicationId))) { curator.writeEndpointCertificateMetadata(applicationId, refreshedCertificateMetadata); } } } })); } /** * If it's been a week since the cert has been refreshed, re-trigger all prod deployment jobs. */ private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private void deleteUnusedCertificates() { var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { try (Lock lock = lock(applicationId)) { if (Optional.of(storedMetaData).equals(curator.readEndpointCertificateMetadata(applicationId))) { log.log(Level.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments, deleting from provider and ZK"); endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData); curator.deleteEndpointCertificateMetadata(applicationId); } } } }); } private Lock lock(ApplicationId applicationId) { return curator.lock(TenantAndApplicationId.from(applicationId)); } private boolean hasNoDeployments(ApplicationId applicationId) { var deployments = curator.readApplication(TenantAndApplicationId.from(applicationId)) .flatMap(app -> app.get(applicationId.instance())) .map(Instance::deployments); return deployments.isEmpty() || deployments.get().size() == 0; } }
👍
public void refreshed_certificate_is_deployed_after_one_week() { var appId = ApplicationId.from("tenant", "application", "default"); DeploymentTester deploymentTester = new DeploymentTester(tester); var applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); DeploymentContext deploymentContext = deploymentTester.newDeploymentContext("tenant", "application", "default"); deploymentContext.submit(applicationPackage).runJob(systemTest).runJob(stagingTest).runJob(productionUsWest1); tester.curator().writeEndpointCertificateMetadata(appId, exampleMetadata); assertTrue(maintainer.maintain()); assertTrue(tester.curator().readEndpointCertificateMetadata(appId).isPresent()); tester.clock().advance(Duration.ofDays(3)); secretStore.setSecret(exampleMetadata.keyName(), "foo", 1); secretStore.setSecret(exampleMetadata.certName(), "bar", 1); maintainer.maintain(); tester.clock().advance(Duration.ofDays(8)); deploymentContext.assertNotRunning(productionUsWest1); maintainer.maintain(); deploymentContext.assertRunning(productionUsWest1); }
}
public void refreshed_certificate_is_deployed_after_one_week() { var appId = ApplicationId.from("tenant", "application", "default"); DeploymentTester deploymentTester = new DeploymentTester(tester); var applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); DeploymentContext deploymentContext = deploymentTester.newDeploymentContext("tenant", "application", "default"); deploymentContext.submit(applicationPackage).runJob(systemTest).runJob(stagingTest).runJob(productionUsWest1); tester.curator().writeEndpointCertificateMetadata(appId, exampleMetadata); assertTrue(maintainer.maintain()); assertTrue(tester.curator().readEndpointCertificateMetadata(appId).isPresent()); tester.clock().advance(Duration.ofDays(3)); secretStore.setSecret(exampleMetadata.keyName(), "foo", 1); secretStore.setSecret(exampleMetadata.certName(), "bar", 1); maintainer.maintain(); tester.clock().advance(Duration.ofDays(8)); deploymentContext.assertNotRunning(productionUsWest1); maintainer.maintain(); deploymentContext.assertRunning(productionUsWest1); }
class EndpointCertificateMaintainerTest { private final ControllerTester tester = new ControllerTester(); private final SecretStoreMock secretStore = (SecretStoreMock) tester.controller().secretStore(); private final EndpointCertificateMaintainer maintainer = new EndpointCertificateMaintainer(tester.controller(), Duration.ofHours(1)); private final EndpointCertificateMetadata exampleMetadata = new EndpointCertificateMetadata("keyName", "certName", 0, 0, "uuid", List.of(), "issuer", Optional.empty(), Optional.empty()); @Before public void setUp() throws Exception { ((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.USE_ENDPOINT_CERTIFICATE_MAINTAINER.id(), true); } @Test public void old_and_unused_cert_is_deleted() { tester.curator().writeEndpointCertificateMetadata(ApplicationId.defaultId(), exampleMetadata); assertTrue(maintainer.maintain()); assertTrue(tester.curator().readEndpointCertificateMetadata(ApplicationId.defaultId()).isEmpty()); } @Test public void unused_but_recently_used_cert_is_not_deleted() { EndpointCertificateMetadata recentlyRequestedCert = exampleMetadata.withLastRequested(tester.clock().instant().minusSeconds(3600).getEpochSecond()); tester.curator().writeEndpointCertificateMetadata(ApplicationId.defaultId(), recentlyRequestedCert); assertTrue(maintainer.maintain()); assertEquals(Optional.of(recentlyRequestedCert), tester.curator().readEndpointCertificateMetadata(ApplicationId.defaultId())); } @Test public void refreshed_certificate_is_updated() { EndpointCertificateMetadata recentlyRequestedCert = exampleMetadata.withLastRequested(tester.clock().instant().minusSeconds(3600).getEpochSecond()); tester.curator().writeEndpointCertificateMetadata(ApplicationId.defaultId(), recentlyRequestedCert); secretStore.setSecret(exampleMetadata.keyName(), "foo", 1); secretStore.setSecret(exampleMetadata.certName(), "bar", 1); assertTrue(maintainer.maintain()); var updatedCert = Optional.of(recentlyRequestedCert.withLastRefreshed(tester.clock().instant().getEpochSecond()).withVersion(1)); assertEquals(updatedCert, tester.curator().readEndpointCertificateMetadata(ApplicationId.defaultId())); } @Test public void certificate_in_use_is_not_deleted() { var appId = ApplicationId.from("tenant", "application", "default"); DeploymentTester deploymentTester = new DeploymentTester(tester); var applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); DeploymentContext deploymentContext = deploymentTester.newDeploymentContext("tenant", "application", "default"); deploymentContext.submit(applicationPackage).runJob(systemTest).runJob(stagingTest).runJob(productionUsWest1); tester.curator().writeEndpointCertificateMetadata(appId, exampleMetadata); assertTrue(maintainer.maintain()); assertTrue(tester.curator().readEndpointCertificateMetadata(appId).isPresent()); } @Test }
class EndpointCertificateMaintainerTest { private final ControllerTester tester = new ControllerTester(); private final SecretStoreMock secretStore = (SecretStoreMock) tester.controller().secretStore(); private final EndpointCertificateMaintainer maintainer = new EndpointCertificateMaintainer(tester.controller(), Duration.ofHours(1)); private final EndpointCertificateMetadata exampleMetadata = new EndpointCertificateMetadata("keyName", "certName", 0, 0, "uuid", List.of(), "issuer", Optional.empty(), Optional.empty()); @Before public void setUp() throws Exception { ((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.USE_ENDPOINT_CERTIFICATE_MAINTAINER.id(), true); } @Test public void old_and_unused_cert_is_deleted() { tester.curator().writeEndpointCertificateMetadata(ApplicationId.defaultId(), exampleMetadata); assertTrue(maintainer.maintain()); assertTrue(tester.curator().readEndpointCertificateMetadata(ApplicationId.defaultId()).isEmpty()); } @Test public void unused_but_recently_used_cert_is_not_deleted() { EndpointCertificateMetadata recentlyRequestedCert = exampleMetadata.withLastRequested(tester.clock().instant().minusSeconds(3600).getEpochSecond()); tester.curator().writeEndpointCertificateMetadata(ApplicationId.defaultId(), recentlyRequestedCert); assertTrue(maintainer.maintain()); assertEquals(Optional.of(recentlyRequestedCert), tester.curator().readEndpointCertificateMetadata(ApplicationId.defaultId())); } @Test public void refreshed_certificate_is_updated() { EndpointCertificateMetadata recentlyRequestedCert = exampleMetadata.withLastRequested(tester.clock().instant().minusSeconds(3600).getEpochSecond()); tester.curator().writeEndpointCertificateMetadata(ApplicationId.defaultId(), recentlyRequestedCert); secretStore.setSecret(exampleMetadata.keyName(), "foo", 1); secretStore.setSecret(exampleMetadata.certName(), "bar", 1); assertTrue(maintainer.maintain()); var updatedCert = Optional.of(recentlyRequestedCert.withLastRefreshed(tester.clock().instant().getEpochSecond()).withVersion(1)); assertEquals(updatedCert, tester.curator().readEndpointCertificateMetadata(ApplicationId.defaultId())); } @Test public void certificate_in_use_is_not_deleted() { var appId = ApplicationId.from("tenant", "application", "default"); DeploymentTester deploymentTester = new DeploymentTester(tester); var applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); DeploymentContext deploymentContext = deploymentTester.newDeploymentContext("tenant", "application", "default"); deploymentContext.submit(applicationPackage).runJob(systemTest).runJob(stagingTest).runJob(productionUsWest1); tester.curator().writeEndpointCertificateMetadata(appId, exampleMetadata); assertTrue(maintainer.maintain()); assertTrue(tester.curator().readEndpointCertificateMetadata(appId).isPresent()); } @Test }
You should check that the data has actually been compressed. Simplest is just to check that the output is shorter than the input.
void output_stream_compresses_input() throws IOException { byte[] inputData = "The quick brown fox jumps over the lazy dog".getBytes(); ByteArrayOutputStream arrayOut = new ByteArrayOutputStream(); try (ZstdOuputStream zstdOut = new ZstdOuputStream(arrayOut, 12)) { zstdOut.write(inputData[0]); zstdOut.write(inputData, 1, inputData.length - 1); } byte[] compressedData = arrayOut.toByteArray(); ZstdCompressor compressor = new ZstdCompressor(); byte[] decompressedData = new byte[inputData.length]; compressor.decompress(compressedData, 0, compressedData.length, decompressedData, 0, decompressedData.length); assertArrayEquals(inputData, decompressedData); }
ZstdCompressor compressor = new ZstdCompressor();
void output_stream_compresses_input() throws IOException { byte[] inputData = "The quick brown fox jumps over the lazy dog".getBytes(); ByteArrayOutputStream arrayOut = new ByteArrayOutputStream(); try (ZstdOuputStream zstdOut = new ZstdOuputStream(arrayOut, 12)) { zstdOut.write(inputData[0]); zstdOut.write(inputData, 1, inputData.length - 1); } byte[] compressedData = arrayOut.toByteArray(); ZstdCompressor compressor = new ZstdCompressor(); byte[] decompressedData = new byte[inputData.length]; compressor.decompress(compressedData, 0, compressedData.length, decompressedData, 0, decompressedData.length); assertArrayEquals(inputData, decompressedData); }
class ZstdOuputStreamTest { @Test }
class ZstdOuputStreamTest { @Test @Test void compressed_size_is_less_than_uncompressed() throws IOException { StringBuilder builder = new StringBuilder(); for (int i = 0; i < 100; i++) { builder.append("The quick brown fox jumps over the lazy dog").append('\n'); } byte[] inputData = builder.toString().getBytes(); ByteArrayOutputStream arrayOut = new ByteArrayOutputStream(); try (ZstdOuputStream zstdOut = new ZstdOuputStream(arrayOut)) { zstdOut.write(inputData); } int compressedSize = arrayOut.toByteArray().length; assertTrue( compressedSize < inputData.length, () -> "Compressed size is " + compressedSize + " while uncompressed size is " + inputData.length); } }
I have added a new test case for that. The compressed size is actually larger for this test string.
void output_stream_compresses_input() throws IOException { byte[] inputData = "The quick brown fox jumps over the lazy dog".getBytes(); ByteArrayOutputStream arrayOut = new ByteArrayOutputStream(); try (ZstdOuputStream zstdOut = new ZstdOuputStream(arrayOut, 12)) { zstdOut.write(inputData[0]); zstdOut.write(inputData, 1, inputData.length - 1); } byte[] compressedData = arrayOut.toByteArray(); ZstdCompressor compressor = new ZstdCompressor(); byte[] decompressedData = new byte[inputData.length]; compressor.decompress(compressedData, 0, compressedData.length, decompressedData, 0, decompressedData.length); assertArrayEquals(inputData, decompressedData); }
ZstdCompressor compressor = new ZstdCompressor();
void output_stream_compresses_input() throws IOException { byte[] inputData = "The quick brown fox jumps over the lazy dog".getBytes(); ByteArrayOutputStream arrayOut = new ByteArrayOutputStream(); try (ZstdOuputStream zstdOut = new ZstdOuputStream(arrayOut, 12)) { zstdOut.write(inputData[0]); zstdOut.write(inputData, 1, inputData.length - 1); } byte[] compressedData = arrayOut.toByteArray(); ZstdCompressor compressor = new ZstdCompressor(); byte[] decompressedData = new byte[inputData.length]; compressor.decompress(compressedData, 0, compressedData.length, decompressedData, 0, decompressedData.length); assertArrayEquals(inputData, decompressedData); }
class ZstdOuputStreamTest { @Test }
class ZstdOuputStreamTest { @Test @Test void compressed_size_is_less_than_uncompressed() throws IOException { StringBuilder builder = new StringBuilder(); for (int i = 0; i < 100; i++) { builder.append("The quick brown fox jumps over the lazy dog").append('\n'); } byte[] inputData = builder.toString().getBytes(); ByteArrayOutputStream arrayOut = new ByteArrayOutputStream(); try (ZstdOuputStream zstdOut = new ZstdOuputStream(arrayOut)) { zstdOut.write(inputData); } int compressedSize = arrayOut.toByteArray().length; assertTrue( compressedSize < inputData.length, () -> "Compressed size is " + compressedSize + " while uncompressed size is " + inputData.length); } }
`flush()` + `flushAndClose()` ?
private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); flush(); flushAndClose(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); setOutputStream(os); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } }
flush();
private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); flush(); flushAndClose(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); setOutputStream(os); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
The test became much simpler once `LogRecord` is gone.
public void testIt() throws IOException { File root = temporaryFolder.newFolder("logfilehandlertest"); String pattern = root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S"; long[] rTimes = {1000, 2000, 10000}; Formatter formatter = new Formatter() { public String format(LogRecord r) { DateFormat df = new SimpleDateFormat("yyyy.MM.dd:HH:mm:ss.SSS"); String timeStamp = df.format(new Date(r.getMillis())); return ("["+timeStamp+"]" + " " + formatMessage(r) + "\n"); } }; LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, pattern, rTimes, null, new StringLogWriter()); long now = System.currentTimeMillis(); long millisPerDay = 60*60*24*1000; long tomorrowDays = (now / millisPerDay) +1; long tomorrowMillis = tomorrowDays * millisPerDay; assertThat(tomorrowMillis+1000).isEqualTo(h.getNextRotationTime(tomorrowMillis)); assertThat(tomorrowMillis+10000).isEqualTo(h.getNextRotationTime(tomorrowMillis+3000)); String message = "test"; h.publish(message); h.publish( "another test"); h.rotateNow(); h.publish(message); h.flush(); h.shutdown(); }
LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, pattern, rTimes, null, new StringLogWriter());
public void testIt() throws IOException { File root = temporaryFolder.newFolder("logfilehandlertest"); String pattern = root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S"; long[] rTimes = {1000, 2000, 10000}; Formatter formatter = new Formatter() { public String format(LogRecord r) { DateFormat df = new SimpleDateFormat("yyyy.MM.dd:HH:mm:ss.SSS"); String timeStamp = df.format(new Date(r.getMillis())); return ("["+timeStamp+"]" + " " + formatMessage(r) + "\n"); } }; LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, pattern, rTimes, null, new StringLogWriter()); long now = System.currentTimeMillis(); long millisPerDay = 60*60*24*1000; long tomorrowDays = (now / millisPerDay) +1; long tomorrowMillis = tomorrowDays * millisPerDay; assertThat(tomorrowMillis+1000).isEqualTo(h.getNextRotationTime(tomorrowMillis)); assertThat(tomorrowMillis+10000).isEqualTo(h.getNextRotationTime(tomorrowMillis+3000)); String message = "test"; h.publish(message); h.publish( "another test"); h.rotateNow(); h.publish(message); h.flush(); h.shutdown(); }
class LogFileHandlerTestCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test @Test public void testSimpleLogging() throws IOException { File logFile = temporaryFolder.newFile("testLogFileG1.txt"); LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, logFile.getAbsolutePath(), "0 5 ...", null, new StringLogWriter()); h.publish("testDeleteFileFirst1"); h.flush(); h.shutdown(); } @Test public void testDeleteFileDuringLogging() throws IOException { File logFile = temporaryFolder.newFile("testLogFileG2.txt"); LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, logFile.getAbsolutePath(), "0 5 ...", null, new StringLogWriter()); h.publish("testDeleteFileDuringLogging1"); h.flush(); logFile.delete(); h.publish("testDeleteFileDuringLogging2"); h.flush(); h.shutdown(); } @Test(timeout = /*5 minutes*/300_000) public void testSymlink() throws IOException, InterruptedException { File root = temporaryFolder.newFolder("testlogforsymlinkchecking"); Formatter formatter = new Formatter() { public String format(LogRecord r) { DateFormat df = new SimpleDateFormat("yyyy.MM.dd:HH:mm:ss.SSS"); String timeStamp = df.format(new Date(r.getMillis())); return ("[" + timeStamp + "]" + " " + formatMessage(r)); } }; LogFileHandler<String> handler = new LogFileHandler<>( Compression.NONE, root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S%s", new long[]{0}, "symlink", new StringLogWriter()); String message = formatter.format(new LogRecord(Level.INFO, "test")); handler.publish(message); String firstFile; do { Thread.sleep(1); firstFile = handler.getFileName(); } while (firstFile == null); handler.rotateNow(); String secondFileName; do { Thread.sleep(1); secondFileName = handler.getFileName(); } while (firstFile.equals(secondFileName)); String longMessage = formatter.format(new LogRecord(Level.INFO, "string which is way longer than the word test")); handler.publish(longMessage); handler.waitDrained(); assertThat(Files.size(Paths.get(firstFile))).isEqualTo(31); final long expectedSecondFileLength = 72; long secondFileLength; do { Thread.sleep(1); secondFileLength = Files.size(Paths.get(secondFileName)); } while (secondFileLength != expectedSecondFileLength); long symlinkFileLength = Files.size(root.toPath().resolve("symlink")); assertThat(symlinkFileLength).isEqualTo(expectedSecondFileLength); handler.shutdown(); } @Test public void testcompression_gzip() throws InterruptedException, IOException { testcompression( Compression.GZIP, "gz", (compressedFile, __) -> uncheck(() -> new String(new GZIPInputStream(Files.newInputStream(compressedFile)).readAllBytes()))); } @Test public void testcompression_zstd() throws InterruptedException, IOException { testcompression( Compression.ZSTD, "zst", (compressedFile, uncompressedSize) -> uncheck(() -> { ZstdCompressor zstdCompressor = new ZstdCompressor(); byte[] uncompressedBytes = new byte[uncompressedSize]; byte[] compressedBytes = Files.readAllBytes(compressedFile); zstdCompressor.decompress(compressedBytes, 0, compressedBytes.length, uncompressedBytes, 0, uncompressedBytes.length); return new String(uncompressedBytes); })); } private void testcompression(Compression compression, String fileExtension, BiFunction<Path, Integer, String> decompressor) throws IOException, InterruptedException { File root = temporaryFolder.newFolder("testcompression" + compression.name()); LogFileHandler<String> h = new LogFileHandler<>( compression, root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S%s", new long[]{0}, null, new StringLogWriter()); int logEntries = 10000; for (int i = 0; i < logEntries; i++) { h.publish("test"); } h.waitDrained(); String f1 = h.getFileName(); assertThat(f1).startsWith(root.getAbsolutePath() + "/logfilehandlertest."); File uncompressed = new File(f1); File compressed = new File(f1 + "." + fileExtension); assertThat(uncompressed).exists(); assertThat(compressed).doesNotExist(); String content = IOUtils.readFile(uncompressed); assertThat(content).hasLineCount(logEntries); h.rotateNow(); while (uncompressed.exists()) { Thread.sleep(1); } assertThat(compressed).exists(); String uncompressedContent = decompressor.apply(compressed.toPath(), content.getBytes().length); assertThat(uncompressedContent).isEqualTo(content); h.shutdown(); } static class StringLogWriter implements LogWriter<String> { @Override public void write(String record, OutputStream outputStream) throws IOException { outputStream.write((record + "\n").getBytes(StandardCharsets.UTF_8)); } } }
class LogFileHandlerTestCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test @Test public void testSimpleLogging() throws IOException { File logFile = temporaryFolder.newFile("testLogFileG1.txt"); LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, logFile.getAbsolutePath(), "0 5 ...", null, new StringLogWriter()); h.publish("testDeleteFileFirst1"); h.flush(); h.shutdown(); } @Test public void testDeleteFileDuringLogging() throws IOException { File logFile = temporaryFolder.newFile("testLogFileG2.txt"); LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, logFile.getAbsolutePath(), "0 5 ...", null, new StringLogWriter()); h.publish("testDeleteFileDuringLogging1"); h.flush(); logFile.delete(); h.publish("testDeleteFileDuringLogging2"); h.flush(); h.shutdown(); } @Test(timeout = /*5 minutes*/300_000) public void testSymlink() throws IOException, InterruptedException { File root = temporaryFolder.newFolder("testlogforsymlinkchecking"); Formatter formatter = new Formatter() { public String format(LogRecord r) { DateFormat df = new SimpleDateFormat("yyyy.MM.dd:HH:mm:ss.SSS"); String timeStamp = df.format(new Date(r.getMillis())); return ("[" + timeStamp + "]" + " " + formatMessage(r)); } }; LogFileHandler<String> handler = new LogFileHandler<>( Compression.NONE, root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S%s", new long[]{0}, "symlink", new StringLogWriter()); String message = formatter.format(new LogRecord(Level.INFO, "test")); handler.publish(message); String firstFile; do { Thread.sleep(1); firstFile = handler.getFileName(); } while (firstFile == null); handler.rotateNow(); String secondFileName; do { Thread.sleep(1); secondFileName = handler.getFileName(); } while (firstFile.equals(secondFileName)); String longMessage = formatter.format(new LogRecord(Level.INFO, "string which is way longer than the word test")); handler.publish(longMessage); handler.waitDrained(); assertThat(Files.size(Paths.get(firstFile))).isEqualTo(31); final long expectedSecondFileLength = 72; long secondFileLength; do { Thread.sleep(1); secondFileLength = Files.size(Paths.get(secondFileName)); } while (secondFileLength != expectedSecondFileLength); long symlinkFileLength = Files.size(root.toPath().resolve("symlink")); assertThat(symlinkFileLength).isEqualTo(expectedSecondFileLength); handler.shutdown(); } @Test public void testcompression_gzip() throws InterruptedException, IOException { testcompression( Compression.GZIP, "gz", (compressedFile, __) -> uncheck(() -> new String(new GZIPInputStream(Files.newInputStream(compressedFile)).readAllBytes()))); } @Test public void testcompression_zstd() throws InterruptedException, IOException { testcompression( Compression.ZSTD, "zst", (compressedFile, uncompressedSize) -> uncheck(() -> { ZstdCompressor zstdCompressor = new ZstdCompressor(); byte[] uncompressedBytes = new byte[uncompressedSize]; byte[] compressedBytes = Files.readAllBytes(compressedFile); zstdCompressor.decompress(compressedBytes, 0, compressedBytes.length, uncompressedBytes, 0, uncompressedBytes.length); return new String(uncompressedBytes); })); } private void testcompression(Compression compression, String fileExtension, BiFunction<Path, Integer, String> decompressor) throws IOException, InterruptedException { File root = temporaryFolder.newFolder("testcompression" + compression.name()); LogFileHandler<String> h = new LogFileHandler<>( compression, root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S%s", new long[]{0}, null, new StringLogWriter()); int logEntries = 10000; for (int i = 0; i < logEntries; i++) { h.publish("test"); } h.waitDrained(); String f1 = h.getFileName(); assertThat(f1).startsWith(root.getAbsolutePath() + "/logfilehandlertest."); File uncompressed = new File(f1); File compressed = new File(f1 + "." + fileExtension); assertThat(uncompressed).exists(); assertThat(compressed).doesNotExist(); String content = IOUtils.readFile(uncompressed); assertThat(content).hasLineCount(logEntries); h.rotateNow(); while (uncompressed.exists()) { Thread.sleep(1); } assertThat(compressed).exists(); String uncompressedContent = decompressor.apply(compressed.toPath(), content.getBytes().length); assertThat(uncompressedContent).isEqualTo(content); h.shutdown(); } static class StringLogWriter implements LogWriter<String> { @Override public void write(String record, OutputStream outputStream) throws IOException { outputStream.write((record + "\n").getBytes(StandardCharsets.UTF_8)); } } }
Will the non-legacy test be updated to test with different parameters? Right now it looks like `testStorageSuspensionLimit` and `testStorageSuspensionLimit_legacy` do the same thing
public void testStorageSuspensionLimit_legacy() { when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(true); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); }
policy.getConcurrentSuspensionLimit(clusterApi, false));
public void testStorageSuspensionLimit_legacy() { when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(true); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); }
class HostedVespaClusterPolicyTest { private ApplicationApi applicationApi = mock(ApplicationApi.class); private ClusterApi clusterApi = mock(ClusterApi.class); private final InMemoryFlagSource flagSource = new InMemoryFlagSource(); private HostedVespaClusterPolicy policy = spy(new HostedVespaClusterPolicy(flagSource)); @Before public void setUp() { when(clusterApi.getApplication()).thenReturn(applicationApi); } @Test public void testSlobrokSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(ServiceType.SLOBROK); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testAdminSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(new ServiceType("non-slobrok-service-type")); assertEquals(ConcurrentSuspensionLimitForCluster.ALL_NODES, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testStorageSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(true); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test @Test public void testTenantHostSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(VespaModelUtil.TENANT_HOST_APPLICATION_ID); when(clusterApi.isStorageCluster()).thenReturn(false); assertEquals(ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testDefaultSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(false); assertEquals(ConcurrentSuspensionLimitForCluster.TEN_PERCENT, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void verifyGroupGoingDownIsFine_noServicesOutsideGroupIsDownIsFine() { verifyGroupGoingDownIsFine(true, Optional.empty(), 13, true); } @Test public void verifyGroupGoingDownIsFine_noServicesInGroupIsUp() { var reasons = new SuspensionReasons().addReason(new HostName("host1"), "supension reason 1"); verifyGroupGoingDownIsFine(false, Optional.of(reasons), 13, true); } @Test public void verifyGroupGoingDownIsFine_percentageIsFine() { verifyGroupGoingDownIsFine(false, Optional.empty(), 9, true); } @Test public void verifyGroupGoingDownIsFine_fails() { verifyGroupGoingDownIsFine(false, Optional.empty(), 13, false); } private void verifyGroupGoingDownIsFine(boolean noServicesOutsideGroupIsDown, Optional<SuspensionReasons> noServicesInGroupIsUp, int percentageOfServicesDownIfGroupIsAllowedToBeDown, boolean expectSuccess) { when(clusterApi.noServicesOutsideGroupIsDown()).thenReturn(noServicesOutsideGroupIsDown); when(clusterApi.reasonsForNoServicesInGroupIsUp()).thenReturn(noServicesInGroupIsUp); when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(20); doReturn(ConcurrentSuspensionLimitForCluster.TEN_PERCENT).when(policy).getConcurrentSuspensionLimit(clusterApi, false); when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.serviceType()).thenReturn(new ServiceType("service-type")); when(clusterApi.percentageOfServicesDown()).thenReturn(5); when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(percentageOfServicesDownIfGroupIsAllowedToBeDown); when(clusterApi.downDescription()).thenReturn(" Down description"); NodeGroup nodeGroup = mock(NodeGroup.class); when(clusterApi.getNodeGroup()).thenReturn(nodeGroup); when(nodeGroup.toCommaSeparatedString()).thenReturn("node-group"); try { SuspensionReasons reasons = policy.verifyGroupGoingDownIsFine(clusterApi); if (!expectSuccess) { fail(); } if (noServicesInGroupIsUp.isPresent()) { assertEquals(noServicesInGroupIsUp.get().getMessagesInOrder(), reasons.getMessagesInOrder()); } } catch (HostStateChangeDeniedException e) { if (!expectSuccess) { assertEquals("Changing the state of node-group would violate enough-services-up: " + "Suspension of service with type 'service-type' would increase from 5% to 13%, " + "over the limit of 10%. Down description", e.getMessage()); assertEquals("enough-services-up", e.getConstraintName()); } } } }
class HostedVespaClusterPolicyTest { private ApplicationApi applicationApi = mock(ApplicationApi.class); private ClusterApi clusterApi = mock(ClusterApi.class); private final InMemoryFlagSource flagSource = new InMemoryFlagSource(); private HostedVespaClusterPolicy policy = spy(new HostedVespaClusterPolicy(flagSource)); @Before public void setUp() { when(clusterApi.getApplication()).thenReturn(applicationApi); } @Test public void testSlobrokSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(ServiceType.SLOBROK); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testAdminSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(new ServiceType("non-slobrok-service-type")); assertEquals(ConcurrentSuspensionLimitForCluster.ALL_NODES, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testStorageSuspensionLimit() { when(clusterApi.serviceType()).thenReturn(ServiceType.STORAGE); when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(true); assertEquals(ConcurrentSuspensionLimitForCluster.ALL_NODES, policy.getConcurrentSuspensionLimit(clusterApi, true)); } @Test @Test public void testTenantHostSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(VespaModelUtil.TENANT_HOST_APPLICATION_ID); when(clusterApi.isStorageCluster()).thenReturn(false); assertEquals(ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testDefaultSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(false); assertEquals(ConcurrentSuspensionLimitForCluster.TEN_PERCENT, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void verifyGroupGoingDownIsFine_noServicesOutsideGroupIsDownIsFine() { verifyGroupGoingDownIsFine(true, Optional.empty(), 13, true); } @Test public void verifyGroupGoingDownIsFine_noServicesInGroupIsUp() { var reasons = new SuspensionReasons().addReason(new HostName("host1"), "supension reason 1"); verifyGroupGoingDownIsFine(false, Optional.of(reasons), 13, true); } @Test public void verifyGroupGoingDownIsFine_percentageIsFine() { verifyGroupGoingDownIsFine(false, Optional.empty(), 9, true); } @Test public void verifyGroupGoingDownIsFine_fails() { verifyGroupGoingDownIsFine(false, Optional.empty(), 13, false); } private void verifyGroupGoingDownIsFine(boolean noServicesOutsideGroupIsDown, Optional<SuspensionReasons> noServicesInGroupIsUp, int percentageOfServicesDownIfGroupIsAllowedToBeDown, boolean expectSuccess) { when(clusterApi.noServicesOutsideGroupIsDown()).thenReturn(noServicesOutsideGroupIsDown); when(clusterApi.reasonsForNoServicesInGroupIsUp()).thenReturn(noServicesInGroupIsUp); when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(20); doReturn(ConcurrentSuspensionLimitForCluster.TEN_PERCENT).when(policy).getConcurrentSuspensionLimit(clusterApi, false); when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.serviceType()).thenReturn(new ServiceType("service-type")); when(clusterApi.percentageOfServicesDown()).thenReturn(5); when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(percentageOfServicesDownIfGroupIsAllowedToBeDown); when(clusterApi.downDescription()).thenReturn(" Down description"); NodeGroup nodeGroup = mock(NodeGroup.class); when(clusterApi.getNodeGroup()).thenReturn(nodeGroup); when(nodeGroup.toCommaSeparatedString()).thenReturn("node-group"); try { SuspensionReasons reasons = policy.verifyGroupGoingDownIsFine(clusterApi); if (!expectSuccess) { fail(); } if (noServicesInGroupIsUp.isPresent()) { assertEquals(noServicesInGroupIsUp.get().getMessagesInOrder(), reasons.getMessagesInOrder()); } } catch (HostStateChangeDeniedException e) { if (!expectSuccess) { assertEquals("Changing the state of node-group would violate enough-services-up: " + "Suspension of service with type 'service-type' would increase from 5% to 13%, " + "over the limit of 10%. Down description", e.getMessage()); assertEquals("enough-services-up", e.getConstraintName()); } } } }
Same here
private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { List<Future<Long>> futures = new ArrayList<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.add(executor.submit(() -> sessionAdded(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); }
log.log(Level.WARNING, "Could not load session");
private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { e.printStackTrace(); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; List<Future<Long>> futures = new ArrayList<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.add(executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public long sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return sessionId; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); return sessionId; } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ long createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); return sessionId; } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; Map<Long, Future<?>> futures = new HashMap<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.put(sessionId, executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Any estimates on how much increased load the cluster controller(s) will be getting after this change?
ConcurrentSuspensionLimitForCluster getConcurrentSuspensionLimit(ClusterApi clusterApi, boolean enableContentGroupSuspension) { if (enableContentGroupSuspension) { if (clusterApi.serviceType().equals(ServiceType.CLUSTER_CONTROLLER)) { return ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT; } if (Set.of(ServiceType.STORAGE, ServiceType.SEARCH, ServiceType.DISTRIBUTOR, ServiceType.TRANSACTION_LOG_SERVER) .contains(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.serviceType().equals(ServiceType.CONTAINER)) { return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } else if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (Set.of(ServiceType.CONFIG_SERVER, ServiceType.CONTROLLER).contains(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (clusterApi.serviceType().equals(ServiceType.HOST_ADMIN)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } else { if (clusterApi.isStorageCluster()) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (ServiceType.CLUSTER_CONTROLLER.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT; } if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.TENANT_HOST_APPLICATION_ID)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } }
return ConcurrentSuspensionLimitForCluster.ALL_NODES;
ConcurrentSuspensionLimitForCluster getConcurrentSuspensionLimit(ClusterApi clusterApi, boolean enableContentGroupSuspension) { if (enableContentGroupSuspension) { if (clusterApi.serviceType().equals(ServiceType.CLUSTER_CONTROLLER)) { return ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT; } if (Set.of(ServiceType.STORAGE, ServiceType.SEARCH, ServiceType.DISTRIBUTOR, ServiceType.TRANSACTION_LOG_SERVER) .contains(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.serviceType().equals(ServiceType.CONTAINER)) { return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } else if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (Set.of(ServiceType.CONFIG_SERVER, ServiceType.CONTROLLER).contains(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (clusterApi.serviceType().equals(ServiceType.HOST_ADMIN)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } else { if (clusterApi.isStorageCluster()) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (ServiceType.CLUSTER_CONTROLLER.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT; } if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.TENANT_HOST_APPLICATION_ID)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } }
class HostedVespaClusterPolicy implements ClusterPolicy { private final BooleanFlag groupSuspensionFlag; public HostedVespaClusterPolicy(FlagSource flagSource) { this.groupSuspensionFlag = Flags.GROUP_SUSPENSION.bindTo(flagSource); } @Override public SuspensionReasons verifyGroupGoingDownIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { boolean enableContentGroupSuspension = groupSuspensionFlag .with(FetchVector.Dimension.APPLICATION_ID, clusterApi.getApplication().applicationId().serializedForm()) .value(); if (clusterApi.noServicesOutsideGroupIsDown()) { return SuspensionReasons.nothingNoteworthy(); } int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, enableContentGroupSuspension).asPercentage(); if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) { return SuspensionReasons.nothingNoteworthy(); } Optional<SuspensionReasons> suspensionReasons = clusterApi.reasonsForNoServicesInGroupIsUp(); if (suspensionReasons.isPresent()) { return suspensionReasons.get(); } String message = percentageOfServicesAllowedToBeDown <= 0 ? "Suspension of service with type '" + clusterApi.serviceType() + "' not allowed: " + clusterApi.percentageOfServicesDown() + "% are suspended already." + clusterApi.downDescription() : "Suspension of service with type '" + clusterApi.serviceType() + "' would increase from " + clusterApi.percentageOfServicesDown() + "% to " + clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() + "%, over the limit of " + percentageOfServicesAllowedToBeDown + "%." + clusterApi.downDescription(); throw new HostStateChangeDeniedException(clusterApi.getNodeGroup(), ENOUGH_SERVICES_UP_CONSTRAINT, message); } @Override public void verifyGroupGoingDownPermanentlyIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { if (clusterApi.noServicesOutsideGroupIsDown()) { return; } int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, false).asPercentage(); if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) { return; } throw new HostStateChangeDeniedException( clusterApi.getNodeGroup(), ENOUGH_SERVICES_UP_CONSTRAINT, "Down percentage for service type " + clusterApi.serviceType() + " would increase to " + clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() + "%, over the limit of " + percentageOfServicesAllowedToBeDown + "%." + clusterApi.downDescription()); } }
class HostedVespaClusterPolicy implements ClusterPolicy { private final BooleanFlag groupSuspensionFlag; public HostedVespaClusterPolicy(FlagSource flagSource) { this.groupSuspensionFlag = Flags.GROUP_SUSPENSION.bindTo(flagSource); } @Override public SuspensionReasons verifyGroupGoingDownIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { boolean enableContentGroupSuspension = groupSuspensionFlag .with(FetchVector.Dimension.APPLICATION_ID, clusterApi.getApplication().applicationId().serializedForm()) .value(); if (clusterApi.noServicesOutsideGroupIsDown()) { return SuspensionReasons.nothingNoteworthy(); } int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, enableContentGroupSuspension).asPercentage(); if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) { return SuspensionReasons.nothingNoteworthy(); } Optional<SuspensionReasons> suspensionReasons = clusterApi.reasonsForNoServicesInGroupIsUp(); if (suspensionReasons.isPresent()) { return suspensionReasons.get(); } String message = percentageOfServicesAllowedToBeDown <= 0 ? "Suspension of service with type '" + clusterApi.serviceType() + "' not allowed: " + clusterApi.percentageOfServicesDown() + "% are suspended already." + clusterApi.downDescription() : "Suspension of service with type '" + clusterApi.serviceType() + "' would increase from " + clusterApi.percentageOfServicesDown() + "% to " + clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() + "%, over the limit of " + percentageOfServicesAllowedToBeDown + "%." + clusterApi.downDescription(); throw new HostStateChangeDeniedException(clusterApi.getNodeGroup(), ENOUGH_SERVICES_UP_CONSTRAINT, message); } @Override public void verifyGroupGoingDownPermanentlyIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { if (clusterApi.noServicesOutsideGroupIsDown()) { return; } int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, false).asPercentage(); if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) { return; } throw new HostStateChangeDeniedException( clusterApi.getNodeGroup(), ENOUGH_SERVICES_UP_CONSTRAINT, "Down percentage for service type " + clusterApi.serviceType() + " would increase to " + clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() + "%, over the limit of " + percentageOfServicesAllowedToBeDown + "%." + clusterApi.downDescription()); } }
Worst-case, all Docker hosts wants to reboot, and in case each content node may end up sending 2 requests (one probe and a follow-up non-probe) per 30s host-admin tick. For large applications there are, say, 50 nodes, so QPS is 3.3. This QPS would fairly flat, i.e. not bursty. Once all hosts wants to reboot we'd hit this max QPS, and it would drop linearly as hosts were rebooted. For a more realistic and common scenario, whenever a large application upgrades, they'll send 1 request/tick, i.e. a max QPS of 1.67 that will drop off linearly. These high QPS MAY also appear today. I looked at an application with 50 content nodes being upgraded: * The first request set node 92 to maintenance, but took longer than the 10s timeout to complete, so that node retried 30s later on the next tick, and when it got permission. * In the 20s window in between, there were 30s of accumulated queries: different nodes trying to get permission to suspend. I counted 49 requests, i.e. QPS of 1.63 and confirming the theory. * However in the 10-13s window there was a burst of queries accumulated from 0-13s, and therefore 4x of normal high QPS. * The latencies were >10s for the queries that actually induce cluster state changes, which is expected. All others have sub-0.1s response times, and spent cumulative 2.6s, which means the CC can handle sustained 20 QPS, or a cluster size of 600 for upgrades, or 300 for host suspensions.
ConcurrentSuspensionLimitForCluster getConcurrentSuspensionLimit(ClusterApi clusterApi, boolean enableContentGroupSuspension) { if (enableContentGroupSuspension) { if (clusterApi.serviceType().equals(ServiceType.CLUSTER_CONTROLLER)) { return ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT; } if (Set.of(ServiceType.STORAGE, ServiceType.SEARCH, ServiceType.DISTRIBUTOR, ServiceType.TRANSACTION_LOG_SERVER) .contains(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.serviceType().equals(ServiceType.CONTAINER)) { return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } else if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (Set.of(ServiceType.CONFIG_SERVER, ServiceType.CONTROLLER).contains(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (clusterApi.serviceType().equals(ServiceType.HOST_ADMIN)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } else { if (clusterApi.isStorageCluster()) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (ServiceType.CLUSTER_CONTROLLER.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT; } if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.TENANT_HOST_APPLICATION_ID)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } }
return ConcurrentSuspensionLimitForCluster.ALL_NODES;
ConcurrentSuspensionLimitForCluster getConcurrentSuspensionLimit(ClusterApi clusterApi, boolean enableContentGroupSuspension) { if (enableContentGroupSuspension) { if (clusterApi.serviceType().equals(ServiceType.CLUSTER_CONTROLLER)) { return ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT; } if (Set.of(ServiceType.STORAGE, ServiceType.SEARCH, ServiceType.DISTRIBUTOR, ServiceType.TRANSACTION_LOG_SERVER) .contains(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.serviceType().equals(ServiceType.CONTAINER)) { return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } else if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (Set.of(ServiceType.CONFIG_SERVER, ServiceType.CONTROLLER).contains(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (clusterApi.serviceType().equals(ServiceType.HOST_ADMIN)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } else { if (clusterApi.isStorageCluster()) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (ServiceType.CLUSTER_CONTROLLER.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT; } if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.TENANT_HOST_APPLICATION_ID)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } }
class HostedVespaClusterPolicy implements ClusterPolicy { private final BooleanFlag groupSuspensionFlag; public HostedVespaClusterPolicy(FlagSource flagSource) { this.groupSuspensionFlag = Flags.GROUP_SUSPENSION.bindTo(flagSource); } @Override public SuspensionReasons verifyGroupGoingDownIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { boolean enableContentGroupSuspension = groupSuspensionFlag .with(FetchVector.Dimension.APPLICATION_ID, clusterApi.getApplication().applicationId().serializedForm()) .value(); if (clusterApi.noServicesOutsideGroupIsDown()) { return SuspensionReasons.nothingNoteworthy(); } int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, enableContentGroupSuspension).asPercentage(); if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) { return SuspensionReasons.nothingNoteworthy(); } Optional<SuspensionReasons> suspensionReasons = clusterApi.reasonsForNoServicesInGroupIsUp(); if (suspensionReasons.isPresent()) { return suspensionReasons.get(); } String message = percentageOfServicesAllowedToBeDown <= 0 ? "Suspension of service with type '" + clusterApi.serviceType() + "' not allowed: " + clusterApi.percentageOfServicesDown() + "% are suspended already." + clusterApi.downDescription() : "Suspension of service with type '" + clusterApi.serviceType() + "' would increase from " + clusterApi.percentageOfServicesDown() + "% to " + clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() + "%, over the limit of " + percentageOfServicesAllowedToBeDown + "%." + clusterApi.downDescription(); throw new HostStateChangeDeniedException(clusterApi.getNodeGroup(), ENOUGH_SERVICES_UP_CONSTRAINT, message); } @Override public void verifyGroupGoingDownPermanentlyIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { if (clusterApi.noServicesOutsideGroupIsDown()) { return; } int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, false).asPercentage(); if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) { return; } throw new HostStateChangeDeniedException( clusterApi.getNodeGroup(), ENOUGH_SERVICES_UP_CONSTRAINT, "Down percentage for service type " + clusterApi.serviceType() + " would increase to " + clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() + "%, over the limit of " + percentageOfServicesAllowedToBeDown + "%." + clusterApi.downDescription()); } }
class HostedVespaClusterPolicy implements ClusterPolicy { private final BooleanFlag groupSuspensionFlag; public HostedVespaClusterPolicy(FlagSource flagSource) { this.groupSuspensionFlag = Flags.GROUP_SUSPENSION.bindTo(flagSource); } @Override public SuspensionReasons verifyGroupGoingDownIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { boolean enableContentGroupSuspension = groupSuspensionFlag .with(FetchVector.Dimension.APPLICATION_ID, clusterApi.getApplication().applicationId().serializedForm()) .value(); if (clusterApi.noServicesOutsideGroupIsDown()) { return SuspensionReasons.nothingNoteworthy(); } int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, enableContentGroupSuspension).asPercentage(); if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) { return SuspensionReasons.nothingNoteworthy(); } Optional<SuspensionReasons> suspensionReasons = clusterApi.reasonsForNoServicesInGroupIsUp(); if (suspensionReasons.isPresent()) { return suspensionReasons.get(); } String message = percentageOfServicesAllowedToBeDown <= 0 ? "Suspension of service with type '" + clusterApi.serviceType() + "' not allowed: " + clusterApi.percentageOfServicesDown() + "% are suspended already." + clusterApi.downDescription() : "Suspension of service with type '" + clusterApi.serviceType() + "' would increase from " + clusterApi.percentageOfServicesDown() + "% to " + clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() + "%, over the limit of " + percentageOfServicesAllowedToBeDown + "%." + clusterApi.downDescription(); throw new HostStateChangeDeniedException(clusterApi.getNodeGroup(), ENOUGH_SERVICES_UP_CONSTRAINT, message); } @Override public void verifyGroupGoingDownPermanentlyIsFine(ClusterApi clusterApi) throws HostStateChangeDeniedException { if (clusterApi.noServicesOutsideGroupIsDown()) { return; } int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, false).asPercentage(); if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) { return; } throw new HostStateChangeDeniedException( clusterApi.getNodeGroup(), ENOUGH_SERVICES_UP_CONSTRAINT, "Down percentage for service type " + clusterApi.serviceType() + " would increase to " + clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() + "%, over the limit of " + percentageOfServicesAllowedToBeDown + "%." + clusterApi.downDescription()); } }
Thanks, fixed.
public void testStorageSuspensionLimit_legacy() { when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(true); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); }
policy.getConcurrentSuspensionLimit(clusterApi, false));
public void testStorageSuspensionLimit_legacy() { when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(true); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); }
class HostedVespaClusterPolicyTest { private ApplicationApi applicationApi = mock(ApplicationApi.class); private ClusterApi clusterApi = mock(ClusterApi.class); private final InMemoryFlagSource flagSource = new InMemoryFlagSource(); private HostedVespaClusterPolicy policy = spy(new HostedVespaClusterPolicy(flagSource)); @Before public void setUp() { when(clusterApi.getApplication()).thenReturn(applicationApi); } @Test public void testSlobrokSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(ServiceType.SLOBROK); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testAdminSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(new ServiceType("non-slobrok-service-type")); assertEquals(ConcurrentSuspensionLimitForCluster.ALL_NODES, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testStorageSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(true); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test @Test public void testTenantHostSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(VespaModelUtil.TENANT_HOST_APPLICATION_ID); when(clusterApi.isStorageCluster()).thenReturn(false); assertEquals(ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testDefaultSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(false); assertEquals(ConcurrentSuspensionLimitForCluster.TEN_PERCENT, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void verifyGroupGoingDownIsFine_noServicesOutsideGroupIsDownIsFine() { verifyGroupGoingDownIsFine(true, Optional.empty(), 13, true); } @Test public void verifyGroupGoingDownIsFine_noServicesInGroupIsUp() { var reasons = new SuspensionReasons().addReason(new HostName("host1"), "supension reason 1"); verifyGroupGoingDownIsFine(false, Optional.of(reasons), 13, true); } @Test public void verifyGroupGoingDownIsFine_percentageIsFine() { verifyGroupGoingDownIsFine(false, Optional.empty(), 9, true); } @Test public void verifyGroupGoingDownIsFine_fails() { verifyGroupGoingDownIsFine(false, Optional.empty(), 13, false); } private void verifyGroupGoingDownIsFine(boolean noServicesOutsideGroupIsDown, Optional<SuspensionReasons> noServicesInGroupIsUp, int percentageOfServicesDownIfGroupIsAllowedToBeDown, boolean expectSuccess) { when(clusterApi.noServicesOutsideGroupIsDown()).thenReturn(noServicesOutsideGroupIsDown); when(clusterApi.reasonsForNoServicesInGroupIsUp()).thenReturn(noServicesInGroupIsUp); when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(20); doReturn(ConcurrentSuspensionLimitForCluster.TEN_PERCENT).when(policy).getConcurrentSuspensionLimit(clusterApi, false); when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.serviceType()).thenReturn(new ServiceType("service-type")); when(clusterApi.percentageOfServicesDown()).thenReturn(5); when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(percentageOfServicesDownIfGroupIsAllowedToBeDown); when(clusterApi.downDescription()).thenReturn(" Down description"); NodeGroup nodeGroup = mock(NodeGroup.class); when(clusterApi.getNodeGroup()).thenReturn(nodeGroup); when(nodeGroup.toCommaSeparatedString()).thenReturn("node-group"); try { SuspensionReasons reasons = policy.verifyGroupGoingDownIsFine(clusterApi); if (!expectSuccess) { fail(); } if (noServicesInGroupIsUp.isPresent()) { assertEquals(noServicesInGroupIsUp.get().getMessagesInOrder(), reasons.getMessagesInOrder()); } } catch (HostStateChangeDeniedException e) { if (!expectSuccess) { assertEquals("Changing the state of node-group would violate enough-services-up: " + "Suspension of service with type 'service-type' would increase from 5% to 13%, " + "over the limit of 10%. Down description", e.getMessage()); assertEquals("enough-services-up", e.getConstraintName()); } } } }
class HostedVespaClusterPolicyTest { private ApplicationApi applicationApi = mock(ApplicationApi.class); private ClusterApi clusterApi = mock(ClusterApi.class); private final InMemoryFlagSource flagSource = new InMemoryFlagSource(); private HostedVespaClusterPolicy policy = spy(new HostedVespaClusterPolicy(flagSource)); @Before public void setUp() { when(clusterApi.getApplication()).thenReturn(applicationApi); } @Test public void testSlobrokSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(ServiceType.SLOBROK); assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testAdminSuspensionLimit() { when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID); when(clusterApi.serviceType()).thenReturn(new ServiceType("non-slobrok-service-type")); assertEquals(ConcurrentSuspensionLimitForCluster.ALL_NODES, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testStorageSuspensionLimit() { when(clusterApi.serviceType()).thenReturn(ServiceType.STORAGE); when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(true); assertEquals(ConcurrentSuspensionLimitForCluster.ALL_NODES, policy.getConcurrentSuspensionLimit(clusterApi, true)); } @Test @Test public void testTenantHostSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(VespaModelUtil.TENANT_HOST_APPLICATION_ID); when(clusterApi.isStorageCluster()).thenReturn(false); assertEquals(ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void testDefaultSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); when(clusterApi.isStorageCluster()).thenReturn(false); assertEquals(ConcurrentSuspensionLimitForCluster.TEN_PERCENT, policy.getConcurrentSuspensionLimit(clusterApi, false)); } @Test public void verifyGroupGoingDownIsFine_noServicesOutsideGroupIsDownIsFine() { verifyGroupGoingDownIsFine(true, Optional.empty(), 13, true); } @Test public void verifyGroupGoingDownIsFine_noServicesInGroupIsUp() { var reasons = new SuspensionReasons().addReason(new HostName("host1"), "supension reason 1"); verifyGroupGoingDownIsFine(false, Optional.of(reasons), 13, true); } @Test public void verifyGroupGoingDownIsFine_percentageIsFine() { verifyGroupGoingDownIsFine(false, Optional.empty(), 9, true); } @Test public void verifyGroupGoingDownIsFine_fails() { verifyGroupGoingDownIsFine(false, Optional.empty(), 13, false); } private void verifyGroupGoingDownIsFine(boolean noServicesOutsideGroupIsDown, Optional<SuspensionReasons> noServicesInGroupIsUp, int percentageOfServicesDownIfGroupIsAllowedToBeDown, boolean expectSuccess) { when(clusterApi.noServicesOutsideGroupIsDown()).thenReturn(noServicesOutsideGroupIsDown); when(clusterApi.reasonsForNoServicesInGroupIsUp()).thenReturn(noServicesInGroupIsUp); when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(20); doReturn(ConcurrentSuspensionLimitForCluster.TEN_PERCENT).when(policy).getConcurrentSuspensionLimit(clusterApi, false); when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.serviceType()).thenReturn(new ServiceType("service-type")); when(clusterApi.percentageOfServicesDown()).thenReturn(5); when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(percentageOfServicesDownIfGroupIsAllowedToBeDown); when(clusterApi.downDescription()).thenReturn(" Down description"); NodeGroup nodeGroup = mock(NodeGroup.class); when(clusterApi.getNodeGroup()).thenReturn(nodeGroup); when(nodeGroup.toCommaSeparatedString()).thenReturn("node-group"); try { SuspensionReasons reasons = policy.verifyGroupGoingDownIsFine(clusterApi); if (!expectSuccess) { fail(); } if (noServicesInGroupIsUp.isPresent()) { assertEquals(noServicesInGroupIsUp.get().getMessagesInOrder(), reasons.getMessagesInOrder()); } } catch (HostStateChangeDeniedException e) { if (!expectSuccess) { assertEquals("Changing the state of node-group would violate enough-services-up: " + "Suspension of service with type 'service-type' would increase from 5% to 13%, " + "over the limit of 10%. Down description", e.getMessage()); assertEquals("enough-services-up", e.getConstraintName()); } } } }
this change must be a relic of the other PR.
private Tenant createTenant(TenantName tenantName, Instant created) { if (tenants.containsKey(tenantName)) return getTenant(tenantName); TenantApplications applicationRepo = new TenantApplications(tenantName, curator, zkWatcherExecutor, zkCacheExecutor, metrics, reloadListener, configserverConfig, hostRegistry, new TenantFileSystemDirs(configServerDB, tenantName), clock); PermanentApplicationPackage permanentApplicationPackage = new PermanentApplicationPackage(configserverConfig); SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory, hostProvisionerProvider, permanentApplicationPackage, configserverConfig, configDefinitionRepo, curator, zone, flagSource, secretStore); SessionRepository sessionRepository = new SessionRepository(tenantName, applicationRepo, sessionPreparer, curator, metrics, zkWatcherExecutor, permanentApplicationPackage, flagSource, zkCacheExecutor, secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, clock, modelFactoryRegistry, configDefinitionRepo, tenantListener); log.log(Level.INFO, "Adding tenant '" + tenantName + "'" + ", created " + created); Tenant tenant = new Tenant(tenantName, sessionRepository, applicationRepo, applicationRepo, created); createAndWriteTenantMetaData(tenant); tenants.putIfAbsent(tenantName, tenant); notifyNewTenant(tenant); return tenant; }
if (tenants.containsKey(tenantName)) return getTenant(tenantName);
private Tenant createTenant(TenantName tenantName, Instant created) { if (tenants.containsKey(tenantName)) return getTenant(tenantName); TenantApplications applicationRepo = new TenantApplications(tenantName, curator, zkWatcherExecutor, zkCacheExecutor, metrics, reloadListener, configserverConfig, hostRegistry, new TenantFileSystemDirs(configServerDB, tenantName), clock); PermanentApplicationPackage permanentApplicationPackage = new PermanentApplicationPackage(configserverConfig); SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory, hostProvisionerProvider, permanentApplicationPackage, configserverConfig, configDefinitionRepo, curator, zone, flagSource, secretStore); SessionRepository sessionRepository = new SessionRepository(tenantName, applicationRepo, sessionPreparer, curator, metrics, zkWatcherExecutor, permanentApplicationPackage, flagSource, zkCacheExecutor, secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, clock, modelFactoryRegistry, configDefinitionRepo, tenantListener); log.log(Level.INFO, "Adding tenant '" + tenantName + "'" + ", created " + created); Tenant tenant = new Tenant(tenantName, sessionRepository, applicationRepo, applicationRepo, created); createAndWriteTenantMetaData(tenant); tenants.putIfAbsent(tenantName, tenant); notifyNewTenant(tenant); return tenant; }
class TenantRepository { public static final TenantName HOSTED_VESPA_TENANT = TenantName.from("hosted-vespa"); private static final TenantName DEFAULT_TENANT = TenantName.defaultName(); private static final Path tenantsPath = Path.fromString("/config/v2/tenants/"); private static final Path locksPath = Path.fromString("/config/v2/locks/"); private static final Path vespaPath = Path.fromString("/vespa"); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private static final Logger log = Logger.getLogger(TenantRepository.class.getName()); private final Map<TenantName, Tenant> tenants = Collections.synchronizedMap(new LinkedHashMap<>()); private final HostRegistry hostRegistry; private final List<TenantListener> tenantListeners = Collections.synchronizedList(new ArrayList<>()); private final Curator curator; private final Metrics metrics; private final MetricUpdater metricUpdater; private final ExecutorService zkCacheExecutor; private final StripedExecutor<TenantName> zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final FlagSource flagSource; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final Clock clock; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final ReloadListener reloadListener; private final TenantListener tenantListener; private final ExecutorService bootstrapExecutor; private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("check for removed applications")); private final Optional<Curator.DirectoryCache> directoryCache; /** * Creates a new tenant repository * */ @Inject public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, FlagSource flagSource, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this(hostRegistry, curator, metrics, new StripedExecutor<>(), new FileDistributionFactory(configserverConfig), flagSource, Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(TenantRepository.class.getName())), secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, Clock.systemUTC(), modelFactoryRegistry, configDefinitionRepo, reloadListener, tenantListener); } public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this.hostRegistry = hostRegistry; this.configserverConfig = configserverConfig; this.bootstrapExecutor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("bootstrap tenants")); this.curator = curator; this.metrics = metrics; metricUpdater = metrics.getOrCreateMetricUpdater(Collections.emptyMap()); this.tenantListeners.add(tenantListener); this.zkCacheExecutor = zkCacheExecutor; this.zkWatcherExecutor = zkWatcherExecutor; this.fileDistributionFactory = fileDistributionFactory; this.flagSource = flagSource; this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configServerDB = configServerDB; this.zone = zone; this.clock = clock; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.reloadListener = reloadListener; this.tenantListener = tenantListener; curator.framework().getConnectionStateListenable().addListener(this::stateChanged); curator.create(tenantsPath); curator.create(locksPath); createSystemTenants(configserverConfig); curator.create(vespaPath); this.directoryCache = Optional.of(curator.createDirectoryCache(tenantsPath.getAbsolute(), false, false, zkCacheExecutor)); this.directoryCache.get().addListener(this::childEvent); this.directoryCache.get().start(); bootstrapTenants(); notifyTenantsLoaded(); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeUnusedApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } private void notifyTenantsLoaded() { for (TenantListener tenantListener : tenantListeners) { tenantListener.onTenantsLoaded(); } } public synchronized Tenant addTenant(TenantName tenantName) { writeTenantPath(tenantName); return createTenant(tenantName, clock.instant()); } public void createAndWriteTenantMetaData(Tenant tenant) { createWriteTenantMetaDataTransaction(createMetaData(tenant)).commit(); } public Transaction createWriteTenantMetaDataTransaction(TenantMetaData tenantMetaData) { return new CuratorTransaction(curator).add( CuratorOperations.setData(TenantRepository.getTenantPath(tenantMetaData.tenantName()).getAbsolute(), tenantMetaData.asJsonBytes())); } private TenantMetaData createMetaData(Tenant tenant) { Instant deployTime = tenant.getSessionRepository().clock().instant(); Instant createdTime = getTenantMetaData(tenant).createdTimestamp(); if (createdTime.equals(Instant.EPOCH)) createdTime = deployTime; return new TenantMetaData(tenant.getName(), deployTime, createdTime); } public TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); Optional<TenantMetaData> metaData; try { metaData = data.map(bytes -> TenantMetaData.fromJsonString(tenant.getName(), Utf8.toString(bytes))); } catch (IllegalArgumentException e) { metaData = Optional.empty(); } return metaData.orElse(new TenantMetaData(tenant.getName(), tenant.getCreatedTime(), tenant.getCreatedTime())); } private static Set<TenantName> readTenantsFromZooKeeper(Curator curator) { return curator.getChildren(tenantsPath).stream().map(TenantName::from).collect(Collectors.toSet()); } private void bootstrapTenants() { Map<TenantName, Future<?>> futures = new HashMap<>(); readTenantsFromZooKeeper(curator).forEach(t -> futures.put(t, bootstrapExecutor.submit(() -> bootstrapTenant(t)))); Set<TenantName> failed = new HashSet<>(); for (Map.Entry<TenantName, Future<?>> f : futures.entrySet()) { TenantName tenantName = f.getKey(); try { f.getValue().get(); } catch (ExecutionException e) { log.log(Level.WARNING, "Failed to create tenant " + tenantName, e); failed.add(tenantName); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while creating tenant '" + tenantName + "'", e); } } if (failed.size() > 0) throw new RuntimeException("Could not create all tenants when bootstrapping, failed to create: " + failed); metricUpdater.setTenants(tenants.size()); bootstrapExecutor.shutdown(); try { bootstrapExecutor.awaitTermination(365, TimeUnit.DAYS); } catch (InterruptedException e) { throw new RuntimeException("Executor for creating tenants did not terminate within timeout"); } } protected synchronized void bootstrapTenant(TenantName tenantName) { createTenant(tenantName, readCreatedTimeFromZooKeeper(tenantName)); } public Instant readCreatedTimeFromZooKeeper(TenantName tenantName) { Optional<Stat> stat = curator.getStat(getTenantPath(tenantName)); if (stat.isPresent()) return Instant.ofEpochMilli(stat.get().getCtime()); else return clock.instant(); } /** * Returns a default (compatibility with single tenant config requests) tenant * * @return default tenant */ public synchronized Tenant defaultTenant() { return tenants.get(DEFAULT_TENANT); } private void removeUnusedApplications() { getAllTenants().forEach(tenant -> tenant.getApplicationRepo().removeUnusedApplications()); } private void notifyNewTenant(Tenant tenant) { for (TenantListener listener : tenantListeners) { listener.onTenantCreate(tenant); } } private void notifyRemovedTenant(TenantName name) { for (TenantListener listener : tenantListeners) { listener.onTenantDelete(name); } } /** * Creates the tenants that should always be present into ZooKeeper. Will not fail if the node * already exists, as this is OK and might happen when several config servers start at the * same time and try to call this method. */ private synchronized void createSystemTenants(ConfigserverConfig configserverConfig) { List<TenantName> systemTenants = new ArrayList<>(); systemTenants.add(DEFAULT_TENANT); if (configserverConfig.hostedVespa()) systemTenants.add(HOSTED_VESPA_TENANT); for (final TenantName tenantName : systemTenants) { try { writeTenantPath(tenantName); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NodeExistsException.class) { throw e; } } } } /** * Writes the path of the given tenant into ZooKeeper, for watchers to react on * * @param name name of the tenant */ private synchronized void writeTenantPath(TenantName name) { curator.createAtomically(TenantRepository.getTenantPath(name), TenantRepository.getSessionsPath(name), TenantRepository.getApplicationsPath(name), TenantRepository.getLocksPath(name)); } /** * Removes the given tenant from ZooKeeper and filesystem. Assumes that tenant exists. * * @param name name of the tenant */ public synchronized void deleteTenant(TenantName name) { if (name.equals(DEFAULT_TENANT)) throw new IllegalArgumentException("Deleting 'default' tenant is not allowed"); if ( ! tenants.containsKey(name)) throw new IllegalArgumentException("Deleting '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Deleting tenant '" + name + "'"); Path path = tenants.get(name).getPath(); closeTenant(name); curator.delete(path); } private synchronized void closeTenant(TenantName name) { Tenant tenant = tenants.remove(name); if (tenant == null) throw new IllegalArgumentException("Closing '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Closing tenant '" + name + "'"); notifyRemovedTenant(name); tenant.close(); } /** * A helper to format a log preamble for messages with a tenant and app id * @param app the app * @return the log string */ public static String logPre(ApplicationId app) { if (DEFAULT_TENANT.equals(app.tenant())) return ""; StringBuilder ret = new StringBuilder() .append(logPre(app.tenant())) .append("app:"+app.application().value()) .append(":"+app.instance().value()) .append(" "); return ret.toString(); } /** * A helper to format a log preamble for messages with a tenant * @param tenant tenant * @return the log string */ public static String logPre(TenantName tenant) { if (DEFAULT_TENANT.equals(tenant)) return ""; StringBuilder ret = new StringBuilder() .append("tenant:" + tenant.value()) .append(" "); return ret.toString(); } private void stateChanged(CuratorFramework framework, ConnectionState connectionState) { switch (connectionState) { case CONNECTED: metricUpdater.incZKConnected(); break; case SUSPENDED: metricUpdater.incZKSuspended(); break; case RECONNECTED: metricUpdater.incZKReconnected(); break; case LOST: metricUpdater.incZKConnectionLost(); break; case READ_ONLY: break; } } private void childEvent(CuratorFramework framework, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: TenantName t1 = getTenantNameFromEvent(event); if ( ! tenants.containsKey(t1)) zkWatcherExecutor.execute(t1, () -> bootstrapTenant(t1)); break; case CHILD_REMOVED: TenantName t2 = getTenantNameFromEvent(event); if (tenants.containsKey(t2)) zkWatcherExecutor.execute(t2, () -> deleteTenant(t2)); break; default: break; } metricUpdater.setTenants(tenants.size()); } private TenantName getTenantNameFromEvent(PathChildrenCacheEvent event) { String path = event.getData().getPath(); String[] pathElements = path.split("/"); if (pathElements.length == 0) throw new IllegalArgumentException("Path " + path + " does not contain a tenant name"); return TenantName.from(pathElements[pathElements.length - 1]); } public void close() { directoryCache.ifPresent(Curator.DirectoryCache::close); try { zkCacheExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); zkWatcherExecutor.shutdownAndWait(); zkCacheExecutor.awaitTermination(50, TimeUnit.SECONDS); checkForRemovedApplicationsService.awaitTermination(50, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while shutting down.", e); Thread.currentThread().interrupt(); } } public boolean checkThatTenantExists(TenantName tenant) { return tenants.containsKey(tenant); } /** Returns the tenant with the given name, or {@code null} if this does not exist. */ public Tenant getTenant(TenantName tenantName) { return tenants.get(tenantName); } public Set<TenantName> getAllTenantNames() { return ImmutableSet.copyOf(tenants.keySet()); } public Collection<Tenant> getAllTenants() { return ImmutableSet.copyOf(tenants.values()); } /** * Gets zookeeper path for tenant data * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper data for a tenant */ public static Path getTenantPath(TenantName tenantName) { return tenantsPath.append(tenantName.value()); } /** * Gets zookeeper path for session data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper sessions data for a tenant */ public static Path getSessionsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.SESSIONS); } /** * Gets zookeeper path for application data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper application data for a tenant */ public static Path getApplicationsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.APPLICATIONS); } /** * Gets zookeeper path for locks for a tenant's applications. This is never cleaned, but shouldn't be a problem. */ public static Path getLocksPath(TenantName tenantName) { return locksPath.append(tenantName.value()); } public Curator getCurator() { return curator; } }
class TenantRepository { public static final TenantName HOSTED_VESPA_TENANT = TenantName.from("hosted-vespa"); private static final TenantName DEFAULT_TENANT = TenantName.defaultName(); private static final Path tenantsPath = Path.fromString("/config/v2/tenants/"); private static final Path locksPath = Path.fromString("/config/v2/locks/"); private static final Path vespaPath = Path.fromString("/vespa"); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private static final Logger log = Logger.getLogger(TenantRepository.class.getName()); private final Map<TenantName, Tenant> tenants = Collections.synchronizedMap(new LinkedHashMap<>()); private final HostRegistry hostRegistry; private final List<TenantListener> tenantListeners = Collections.synchronizedList(new ArrayList<>()); private final Curator curator; private final Metrics metrics; private final MetricUpdater metricUpdater; private final ExecutorService zkCacheExecutor; private final StripedExecutor<TenantName> zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final FlagSource flagSource; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final Clock clock; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final ReloadListener reloadListener; private final TenantListener tenantListener; private final ExecutorService bootstrapExecutor; private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("check for removed applications")); private final Optional<Curator.DirectoryCache> directoryCache; /** * Creates a new tenant repository * */ @Inject public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, FlagSource flagSource, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this(hostRegistry, curator, metrics, new StripedExecutor<>(), new FileDistributionFactory(configserverConfig), flagSource, Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(TenantRepository.class.getName())), secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, Clock.systemUTC(), modelFactoryRegistry, configDefinitionRepo, reloadListener, tenantListener); } public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this.hostRegistry = hostRegistry; this.configserverConfig = configserverConfig; this.bootstrapExecutor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("bootstrap tenants")); this.curator = curator; this.metrics = metrics; metricUpdater = metrics.getOrCreateMetricUpdater(Collections.emptyMap()); this.tenantListeners.add(tenantListener); this.zkCacheExecutor = zkCacheExecutor; this.zkWatcherExecutor = zkWatcherExecutor; this.fileDistributionFactory = fileDistributionFactory; this.flagSource = flagSource; this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configServerDB = configServerDB; this.zone = zone; this.clock = clock; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.reloadListener = reloadListener; this.tenantListener = tenantListener; curator.framework().getConnectionStateListenable().addListener(this::stateChanged); curator.create(tenantsPath); curator.create(locksPath); createSystemTenants(configserverConfig); curator.create(vespaPath); this.directoryCache = Optional.of(curator.createDirectoryCache(tenantsPath.getAbsolute(), false, false, zkCacheExecutor)); this.directoryCache.get().addListener(this::childEvent); this.directoryCache.get().start(); bootstrapTenants(); notifyTenantsLoaded(); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeUnusedApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } private void notifyTenantsLoaded() { for (TenantListener tenantListener : tenantListeners) { tenantListener.onTenantsLoaded(); } } public synchronized Tenant addTenant(TenantName tenantName) { writeTenantPath(tenantName); return createTenant(tenantName, clock.instant()); } public void createAndWriteTenantMetaData(Tenant tenant) { createWriteTenantMetaDataTransaction(createMetaData(tenant)).commit(); } public Transaction createWriteTenantMetaDataTransaction(TenantMetaData tenantMetaData) { return new CuratorTransaction(curator).add( CuratorOperations.setData(TenantRepository.getTenantPath(tenantMetaData.tenantName()).getAbsolute(), tenantMetaData.asJsonBytes())); } private TenantMetaData createMetaData(Tenant tenant) { Instant deployTime = tenant.getSessionRepository().clock().instant(); Instant createdTime = getTenantMetaData(tenant).createdTimestamp(); if (createdTime.equals(Instant.EPOCH)) createdTime = deployTime; return new TenantMetaData(tenant.getName(), deployTime, createdTime); } public TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); Optional<TenantMetaData> metaData; try { metaData = data.map(bytes -> TenantMetaData.fromJsonString(tenant.getName(), Utf8.toString(bytes))); } catch (IllegalArgumentException e) { metaData = Optional.empty(); } return metaData.orElse(new TenantMetaData(tenant.getName(), tenant.getCreatedTime(), tenant.getCreatedTime())); } private static Set<TenantName> readTenantsFromZooKeeper(Curator curator) { return curator.getChildren(tenantsPath).stream().map(TenantName::from).collect(Collectors.toSet()); } private void bootstrapTenants() { Map<TenantName, Future<?>> futures = new HashMap<>(); readTenantsFromZooKeeper(curator).forEach(t -> futures.put(t, bootstrapExecutor.submit(() -> bootstrapTenant(t)))); Set<TenantName> failed = new HashSet<>(); for (Map.Entry<TenantName, Future<?>> f : futures.entrySet()) { TenantName tenantName = f.getKey(); try { f.getValue().get(); } catch (ExecutionException e) { log.log(Level.WARNING, "Failed to create tenant " + tenantName, e); failed.add(tenantName); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while creating tenant '" + tenantName + "'", e); } } if (failed.size() > 0) throw new RuntimeException("Could not create all tenants when bootstrapping, failed to create: " + failed); metricUpdater.setTenants(tenants.size()); bootstrapExecutor.shutdown(); try { bootstrapExecutor.awaitTermination(365, TimeUnit.DAYS); } catch (InterruptedException e) { throw new RuntimeException("Executor for creating tenants did not terminate within timeout"); } } protected synchronized void bootstrapTenant(TenantName tenantName) { createTenant(tenantName, readCreatedTimeFromZooKeeper(tenantName)); } public Instant readCreatedTimeFromZooKeeper(TenantName tenantName) { Optional<Stat> stat = curator.getStat(getTenantPath(tenantName)); if (stat.isPresent()) return Instant.ofEpochMilli(stat.get().getCtime()); else return clock.instant(); } /** * Returns a default (compatibility with single tenant config requests) tenant * * @return default tenant */ public synchronized Tenant defaultTenant() { return tenants.get(DEFAULT_TENANT); } private void removeUnusedApplications() { getAllTenants().forEach(tenant -> tenant.getApplicationRepo().removeUnusedApplications()); } private void notifyNewTenant(Tenant tenant) { for (TenantListener listener : tenantListeners) { listener.onTenantCreate(tenant); } } private void notifyRemovedTenant(TenantName name) { for (TenantListener listener : tenantListeners) { listener.onTenantDelete(name); } } /** * Creates the tenants that should always be present into ZooKeeper. Will not fail if the node * already exists, as this is OK and might happen when several config servers start at the * same time and try to call this method. */ private synchronized void createSystemTenants(ConfigserverConfig configserverConfig) { List<TenantName> systemTenants = new ArrayList<>(); systemTenants.add(DEFAULT_TENANT); if (configserverConfig.hostedVespa()) systemTenants.add(HOSTED_VESPA_TENANT); for (final TenantName tenantName : systemTenants) { try { writeTenantPath(tenantName); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NodeExistsException.class) { throw e; } } } } /** * Writes the path of the given tenant into ZooKeeper, for watchers to react on * * @param name name of the tenant */ private synchronized void writeTenantPath(TenantName name) { curator.createAtomically(TenantRepository.getTenantPath(name), TenantRepository.getSessionsPath(name), TenantRepository.getApplicationsPath(name), TenantRepository.getLocksPath(name)); } /** * Removes the given tenant from ZooKeeper and filesystem. Assumes that tenant exists. * * @param name name of the tenant */ public synchronized void deleteTenant(TenantName name) { if (name.equals(DEFAULT_TENANT)) throw new IllegalArgumentException("Deleting 'default' tenant is not allowed"); if ( ! tenants.containsKey(name)) throw new IllegalArgumentException("Deleting '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Deleting tenant '" + name + "'"); Path path = tenants.get(name).getPath(); closeTenant(name); curator.delete(path); } private synchronized void closeTenant(TenantName name) { Tenant tenant = tenants.remove(name); if (tenant == null) throw new IllegalArgumentException("Closing '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Closing tenant '" + name + "'"); notifyRemovedTenant(name); tenant.close(); } /** * A helper to format a log preamble for messages with a tenant and app id * @param app the app * @return the log string */ public static String logPre(ApplicationId app) { if (DEFAULT_TENANT.equals(app.tenant())) return ""; StringBuilder ret = new StringBuilder() .append(logPre(app.tenant())) .append("app:"+app.application().value()) .append(":"+app.instance().value()) .append(" "); return ret.toString(); } /** * A helper to format a log preamble for messages with a tenant * @param tenant tenant * @return the log string */ public static String logPre(TenantName tenant) { if (DEFAULT_TENANT.equals(tenant)) return ""; StringBuilder ret = new StringBuilder() .append("tenant:" + tenant.value()) .append(" "); return ret.toString(); } private void stateChanged(CuratorFramework framework, ConnectionState connectionState) { switch (connectionState) { case CONNECTED: metricUpdater.incZKConnected(); break; case SUSPENDED: metricUpdater.incZKSuspended(); break; case RECONNECTED: metricUpdater.incZKReconnected(); break; case LOST: metricUpdater.incZKConnectionLost(); break; case READ_ONLY: break; } } private void childEvent(CuratorFramework framework, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: TenantName t1 = getTenantNameFromEvent(event); if ( ! tenants.containsKey(t1)) zkWatcherExecutor.execute(t1, () -> bootstrapTenant(t1)); break; case CHILD_REMOVED: TenantName t2 = getTenantNameFromEvent(event); if (tenants.containsKey(t2)) zkWatcherExecutor.execute(t2, () -> deleteTenant(t2)); break; default: break; } metricUpdater.setTenants(tenants.size()); } private TenantName getTenantNameFromEvent(PathChildrenCacheEvent event) { String path = event.getData().getPath(); String[] pathElements = path.split("/"); if (pathElements.length == 0) throw new IllegalArgumentException("Path " + path + " does not contain a tenant name"); return TenantName.from(pathElements[pathElements.length - 1]); } public void close() { directoryCache.ifPresent(Curator.DirectoryCache::close); try { zkCacheExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); zkWatcherExecutor.shutdownAndWait(); zkCacheExecutor.awaitTermination(50, TimeUnit.SECONDS); checkForRemovedApplicationsService.awaitTermination(50, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while shutting down.", e); Thread.currentThread().interrupt(); } } public boolean checkThatTenantExists(TenantName tenant) { return tenants.containsKey(tenant); } /** Returns the tenant with the given name, or {@code null} if this does not exist. */ public Tenant getTenant(TenantName tenantName) { return tenants.get(tenantName); } public Set<TenantName> getAllTenantNames() { return ImmutableSet.copyOf(tenants.keySet()); } public Collection<Tenant> getAllTenants() { return ImmutableSet.copyOf(tenants.values()); } /** * Gets zookeeper path for tenant data * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper data for a tenant */ public static Path getTenantPath(TenantName tenantName) { return tenantsPath.append(tenantName.value()); } /** * Gets zookeeper path for session data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper sessions data for a tenant */ public static Path getSessionsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.SESSIONS); } /** * Gets zookeeper path for application data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper application data for a tenant */ public static Path getApplicationsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.APPLICATIONS); } /** * Gets zookeeper path for locks for a tenant's applications. This is never cleaned, but shouldn't be a problem. */ public static Path getLocksPath(TenantName tenantName) { return locksPath.append(tenantName.value()); } public Curator getCurator() { return curator; } }
ah, as you say "includes changes in #16196"
private Tenant createTenant(TenantName tenantName, Instant created) { if (tenants.containsKey(tenantName)) return getTenant(tenantName); TenantApplications applicationRepo = new TenantApplications(tenantName, curator, zkWatcherExecutor, zkCacheExecutor, metrics, reloadListener, configserverConfig, hostRegistry, new TenantFileSystemDirs(configServerDB, tenantName), clock); PermanentApplicationPackage permanentApplicationPackage = new PermanentApplicationPackage(configserverConfig); SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory, hostProvisionerProvider, permanentApplicationPackage, configserverConfig, configDefinitionRepo, curator, zone, flagSource, secretStore); SessionRepository sessionRepository = new SessionRepository(tenantName, applicationRepo, sessionPreparer, curator, metrics, zkWatcherExecutor, permanentApplicationPackage, flagSource, zkCacheExecutor, secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, clock, modelFactoryRegistry, configDefinitionRepo, tenantListener); log.log(Level.INFO, "Adding tenant '" + tenantName + "'" + ", created " + created); Tenant tenant = new Tenant(tenantName, sessionRepository, applicationRepo, applicationRepo, created); createAndWriteTenantMetaData(tenant); tenants.putIfAbsent(tenantName, tenant); notifyNewTenant(tenant); return tenant; }
if (tenants.containsKey(tenantName)) return getTenant(tenantName);
private Tenant createTenant(TenantName tenantName, Instant created) { if (tenants.containsKey(tenantName)) return getTenant(tenantName); TenantApplications applicationRepo = new TenantApplications(tenantName, curator, zkWatcherExecutor, zkCacheExecutor, metrics, reloadListener, configserverConfig, hostRegistry, new TenantFileSystemDirs(configServerDB, tenantName), clock); PermanentApplicationPackage permanentApplicationPackage = new PermanentApplicationPackage(configserverConfig); SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory, hostProvisionerProvider, permanentApplicationPackage, configserverConfig, configDefinitionRepo, curator, zone, flagSource, secretStore); SessionRepository sessionRepository = new SessionRepository(tenantName, applicationRepo, sessionPreparer, curator, metrics, zkWatcherExecutor, permanentApplicationPackage, flagSource, zkCacheExecutor, secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, clock, modelFactoryRegistry, configDefinitionRepo, tenantListener); log.log(Level.INFO, "Adding tenant '" + tenantName + "'" + ", created " + created); Tenant tenant = new Tenant(tenantName, sessionRepository, applicationRepo, applicationRepo, created); createAndWriteTenantMetaData(tenant); tenants.putIfAbsent(tenantName, tenant); notifyNewTenant(tenant); return tenant; }
class TenantRepository { public static final TenantName HOSTED_VESPA_TENANT = TenantName.from("hosted-vespa"); private static final TenantName DEFAULT_TENANT = TenantName.defaultName(); private static final Path tenantsPath = Path.fromString("/config/v2/tenants/"); private static final Path locksPath = Path.fromString("/config/v2/locks/"); private static final Path vespaPath = Path.fromString("/vespa"); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private static final Logger log = Logger.getLogger(TenantRepository.class.getName()); private final Map<TenantName, Tenant> tenants = Collections.synchronizedMap(new LinkedHashMap<>()); private final HostRegistry hostRegistry; private final List<TenantListener> tenantListeners = Collections.synchronizedList(new ArrayList<>()); private final Curator curator; private final Metrics metrics; private final MetricUpdater metricUpdater; private final ExecutorService zkCacheExecutor; private final StripedExecutor<TenantName> zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final FlagSource flagSource; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final Clock clock; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final ReloadListener reloadListener; private final TenantListener tenantListener; private final ExecutorService bootstrapExecutor; private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("check for removed applications")); private final Optional<Curator.DirectoryCache> directoryCache; /** * Creates a new tenant repository * */ @Inject public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, FlagSource flagSource, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this(hostRegistry, curator, metrics, new StripedExecutor<>(), new FileDistributionFactory(configserverConfig), flagSource, Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(TenantRepository.class.getName())), secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, Clock.systemUTC(), modelFactoryRegistry, configDefinitionRepo, reloadListener, tenantListener); } public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this.hostRegistry = hostRegistry; this.configserverConfig = configserverConfig; this.bootstrapExecutor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("bootstrap tenants")); this.curator = curator; this.metrics = metrics; metricUpdater = metrics.getOrCreateMetricUpdater(Collections.emptyMap()); this.tenantListeners.add(tenantListener); this.zkCacheExecutor = zkCacheExecutor; this.zkWatcherExecutor = zkWatcherExecutor; this.fileDistributionFactory = fileDistributionFactory; this.flagSource = flagSource; this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configServerDB = configServerDB; this.zone = zone; this.clock = clock; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.reloadListener = reloadListener; this.tenantListener = tenantListener; curator.framework().getConnectionStateListenable().addListener(this::stateChanged); curator.create(tenantsPath); curator.create(locksPath); createSystemTenants(configserverConfig); curator.create(vespaPath); this.directoryCache = Optional.of(curator.createDirectoryCache(tenantsPath.getAbsolute(), false, false, zkCacheExecutor)); this.directoryCache.get().addListener(this::childEvent); this.directoryCache.get().start(); bootstrapTenants(); notifyTenantsLoaded(); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeUnusedApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } private void notifyTenantsLoaded() { for (TenantListener tenantListener : tenantListeners) { tenantListener.onTenantsLoaded(); } } public synchronized Tenant addTenant(TenantName tenantName) { writeTenantPath(tenantName); return createTenant(tenantName, clock.instant()); } public void createAndWriteTenantMetaData(Tenant tenant) { createWriteTenantMetaDataTransaction(createMetaData(tenant)).commit(); } public Transaction createWriteTenantMetaDataTransaction(TenantMetaData tenantMetaData) { return new CuratorTransaction(curator).add( CuratorOperations.setData(TenantRepository.getTenantPath(tenantMetaData.tenantName()).getAbsolute(), tenantMetaData.asJsonBytes())); } private TenantMetaData createMetaData(Tenant tenant) { Instant deployTime = tenant.getSessionRepository().clock().instant(); Instant createdTime = getTenantMetaData(tenant).createdTimestamp(); if (createdTime.equals(Instant.EPOCH)) createdTime = deployTime; return new TenantMetaData(tenant.getName(), deployTime, createdTime); } public TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); Optional<TenantMetaData> metaData; try { metaData = data.map(bytes -> TenantMetaData.fromJsonString(tenant.getName(), Utf8.toString(bytes))); } catch (IllegalArgumentException e) { metaData = Optional.empty(); } return metaData.orElse(new TenantMetaData(tenant.getName(), tenant.getCreatedTime(), tenant.getCreatedTime())); } private static Set<TenantName> readTenantsFromZooKeeper(Curator curator) { return curator.getChildren(tenantsPath).stream().map(TenantName::from).collect(Collectors.toSet()); } private void bootstrapTenants() { Map<TenantName, Future<?>> futures = new HashMap<>(); readTenantsFromZooKeeper(curator).forEach(t -> futures.put(t, bootstrapExecutor.submit(() -> bootstrapTenant(t)))); Set<TenantName> failed = new HashSet<>(); for (Map.Entry<TenantName, Future<?>> f : futures.entrySet()) { TenantName tenantName = f.getKey(); try { f.getValue().get(); } catch (ExecutionException e) { log.log(Level.WARNING, "Failed to create tenant " + tenantName, e); failed.add(tenantName); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while creating tenant '" + tenantName + "'", e); } } if (failed.size() > 0) throw new RuntimeException("Could not create all tenants when bootstrapping, failed to create: " + failed); metricUpdater.setTenants(tenants.size()); bootstrapExecutor.shutdown(); try { bootstrapExecutor.awaitTermination(365, TimeUnit.DAYS); } catch (InterruptedException e) { throw new RuntimeException("Executor for creating tenants did not terminate within timeout"); } } protected synchronized void bootstrapTenant(TenantName tenantName) { createTenant(tenantName, readCreatedTimeFromZooKeeper(tenantName)); } public Instant readCreatedTimeFromZooKeeper(TenantName tenantName) { Optional<Stat> stat = curator.getStat(getTenantPath(tenantName)); if (stat.isPresent()) return Instant.ofEpochMilli(stat.get().getCtime()); else return clock.instant(); } /** * Returns a default (compatibility with single tenant config requests) tenant * * @return default tenant */ public synchronized Tenant defaultTenant() { return tenants.get(DEFAULT_TENANT); } private void removeUnusedApplications() { getAllTenants().forEach(tenant -> tenant.getApplicationRepo().removeUnusedApplications()); } private void notifyNewTenant(Tenant tenant) { for (TenantListener listener : tenantListeners) { listener.onTenantCreate(tenant); } } private void notifyRemovedTenant(TenantName name) { for (TenantListener listener : tenantListeners) { listener.onTenantDelete(name); } } /** * Creates the tenants that should always be present into ZooKeeper. Will not fail if the node * already exists, as this is OK and might happen when several config servers start at the * same time and try to call this method. */ private synchronized void createSystemTenants(ConfigserverConfig configserverConfig) { List<TenantName> systemTenants = new ArrayList<>(); systemTenants.add(DEFAULT_TENANT); if (configserverConfig.hostedVespa()) systemTenants.add(HOSTED_VESPA_TENANT); for (final TenantName tenantName : systemTenants) { try { writeTenantPath(tenantName); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NodeExistsException.class) { throw e; } } } } /** * Writes the path of the given tenant into ZooKeeper, for watchers to react on * * @param name name of the tenant */ private synchronized void writeTenantPath(TenantName name) { curator.createAtomically(TenantRepository.getTenantPath(name), TenantRepository.getSessionsPath(name), TenantRepository.getApplicationsPath(name), TenantRepository.getLocksPath(name)); } /** * Removes the given tenant from ZooKeeper and filesystem. Assumes that tenant exists. * * @param name name of the tenant */ public synchronized void deleteTenant(TenantName name) { if (name.equals(DEFAULT_TENANT)) throw new IllegalArgumentException("Deleting 'default' tenant is not allowed"); if ( ! tenants.containsKey(name)) throw new IllegalArgumentException("Deleting '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Deleting tenant '" + name + "'"); Path path = tenants.get(name).getPath(); closeTenant(name); curator.delete(path); } private synchronized void closeTenant(TenantName name) { Tenant tenant = tenants.remove(name); if (tenant == null) throw new IllegalArgumentException("Closing '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Closing tenant '" + name + "'"); notifyRemovedTenant(name); tenant.close(); } /** * A helper to format a log preamble for messages with a tenant and app id * @param app the app * @return the log string */ public static String logPre(ApplicationId app) { if (DEFAULT_TENANT.equals(app.tenant())) return ""; StringBuilder ret = new StringBuilder() .append(logPre(app.tenant())) .append("app:"+app.application().value()) .append(":"+app.instance().value()) .append(" "); return ret.toString(); } /** * A helper to format a log preamble for messages with a tenant * @param tenant tenant * @return the log string */ public static String logPre(TenantName tenant) { if (DEFAULT_TENANT.equals(tenant)) return ""; StringBuilder ret = new StringBuilder() .append("tenant:" + tenant.value()) .append(" "); return ret.toString(); } private void stateChanged(CuratorFramework framework, ConnectionState connectionState) { switch (connectionState) { case CONNECTED: metricUpdater.incZKConnected(); break; case SUSPENDED: metricUpdater.incZKSuspended(); break; case RECONNECTED: metricUpdater.incZKReconnected(); break; case LOST: metricUpdater.incZKConnectionLost(); break; case READ_ONLY: break; } } private void childEvent(CuratorFramework framework, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: TenantName t1 = getTenantNameFromEvent(event); if ( ! tenants.containsKey(t1)) zkWatcherExecutor.execute(t1, () -> bootstrapTenant(t1)); break; case CHILD_REMOVED: TenantName t2 = getTenantNameFromEvent(event); if (tenants.containsKey(t2)) zkWatcherExecutor.execute(t2, () -> deleteTenant(t2)); break; default: break; } metricUpdater.setTenants(tenants.size()); } private TenantName getTenantNameFromEvent(PathChildrenCacheEvent event) { String path = event.getData().getPath(); String[] pathElements = path.split("/"); if (pathElements.length == 0) throw new IllegalArgumentException("Path " + path + " does not contain a tenant name"); return TenantName.from(pathElements[pathElements.length - 1]); } public void close() { directoryCache.ifPresent(Curator.DirectoryCache::close); try { zkCacheExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); zkWatcherExecutor.shutdownAndWait(); zkCacheExecutor.awaitTermination(50, TimeUnit.SECONDS); checkForRemovedApplicationsService.awaitTermination(50, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while shutting down.", e); Thread.currentThread().interrupt(); } } public boolean checkThatTenantExists(TenantName tenant) { return tenants.containsKey(tenant); } /** Returns the tenant with the given name, or {@code null} if this does not exist. */ public Tenant getTenant(TenantName tenantName) { return tenants.get(tenantName); } public Set<TenantName> getAllTenantNames() { return ImmutableSet.copyOf(tenants.keySet()); } public Collection<Tenant> getAllTenants() { return ImmutableSet.copyOf(tenants.values()); } /** * Gets zookeeper path for tenant data * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper data for a tenant */ public static Path getTenantPath(TenantName tenantName) { return tenantsPath.append(tenantName.value()); } /** * Gets zookeeper path for session data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper sessions data for a tenant */ public static Path getSessionsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.SESSIONS); } /** * Gets zookeeper path for application data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper application data for a tenant */ public static Path getApplicationsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.APPLICATIONS); } /** * Gets zookeeper path for locks for a tenant's applications. This is never cleaned, but shouldn't be a problem. */ public static Path getLocksPath(TenantName tenantName) { return locksPath.append(tenantName.value()); } public Curator getCurator() { return curator; } }
class TenantRepository { public static final TenantName HOSTED_VESPA_TENANT = TenantName.from("hosted-vespa"); private static final TenantName DEFAULT_TENANT = TenantName.defaultName(); private static final Path tenantsPath = Path.fromString("/config/v2/tenants/"); private static final Path locksPath = Path.fromString("/config/v2/locks/"); private static final Path vespaPath = Path.fromString("/vespa"); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private static final Logger log = Logger.getLogger(TenantRepository.class.getName()); private final Map<TenantName, Tenant> tenants = Collections.synchronizedMap(new LinkedHashMap<>()); private final HostRegistry hostRegistry; private final List<TenantListener> tenantListeners = Collections.synchronizedList(new ArrayList<>()); private final Curator curator; private final Metrics metrics; private final MetricUpdater metricUpdater; private final ExecutorService zkCacheExecutor; private final StripedExecutor<TenantName> zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final FlagSource flagSource; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final Clock clock; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final ReloadListener reloadListener; private final TenantListener tenantListener; private final ExecutorService bootstrapExecutor; private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("check for removed applications")); private final Optional<Curator.DirectoryCache> directoryCache; /** * Creates a new tenant repository * */ @Inject public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, FlagSource flagSource, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this(hostRegistry, curator, metrics, new StripedExecutor<>(), new FileDistributionFactory(configserverConfig), flagSource, Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(TenantRepository.class.getName())), secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, Clock.systemUTC(), modelFactoryRegistry, configDefinitionRepo, reloadListener, tenantListener); } public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this.hostRegistry = hostRegistry; this.configserverConfig = configserverConfig; this.bootstrapExecutor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("bootstrap tenants")); this.curator = curator; this.metrics = metrics; metricUpdater = metrics.getOrCreateMetricUpdater(Collections.emptyMap()); this.tenantListeners.add(tenantListener); this.zkCacheExecutor = zkCacheExecutor; this.zkWatcherExecutor = zkWatcherExecutor; this.fileDistributionFactory = fileDistributionFactory; this.flagSource = flagSource; this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configServerDB = configServerDB; this.zone = zone; this.clock = clock; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.reloadListener = reloadListener; this.tenantListener = tenantListener; curator.framework().getConnectionStateListenable().addListener(this::stateChanged); curator.create(tenantsPath); curator.create(locksPath); createSystemTenants(configserverConfig); curator.create(vespaPath); this.directoryCache = Optional.of(curator.createDirectoryCache(tenantsPath.getAbsolute(), false, false, zkCacheExecutor)); this.directoryCache.get().addListener(this::childEvent); this.directoryCache.get().start(); bootstrapTenants(); notifyTenantsLoaded(); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeUnusedApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } private void notifyTenantsLoaded() { for (TenantListener tenantListener : tenantListeners) { tenantListener.onTenantsLoaded(); } } public synchronized Tenant addTenant(TenantName tenantName) { writeTenantPath(tenantName); return createTenant(tenantName, clock.instant()); } public void createAndWriteTenantMetaData(Tenant tenant) { createWriteTenantMetaDataTransaction(createMetaData(tenant)).commit(); } public Transaction createWriteTenantMetaDataTransaction(TenantMetaData tenantMetaData) { return new CuratorTransaction(curator).add( CuratorOperations.setData(TenantRepository.getTenantPath(tenantMetaData.tenantName()).getAbsolute(), tenantMetaData.asJsonBytes())); } private TenantMetaData createMetaData(Tenant tenant) { Instant deployTime = tenant.getSessionRepository().clock().instant(); Instant createdTime = getTenantMetaData(tenant).createdTimestamp(); if (createdTime.equals(Instant.EPOCH)) createdTime = deployTime; return new TenantMetaData(tenant.getName(), deployTime, createdTime); } public TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); Optional<TenantMetaData> metaData; try { metaData = data.map(bytes -> TenantMetaData.fromJsonString(tenant.getName(), Utf8.toString(bytes))); } catch (IllegalArgumentException e) { metaData = Optional.empty(); } return metaData.orElse(new TenantMetaData(tenant.getName(), tenant.getCreatedTime(), tenant.getCreatedTime())); } private static Set<TenantName> readTenantsFromZooKeeper(Curator curator) { return curator.getChildren(tenantsPath).stream().map(TenantName::from).collect(Collectors.toSet()); } private void bootstrapTenants() { Map<TenantName, Future<?>> futures = new HashMap<>(); readTenantsFromZooKeeper(curator).forEach(t -> futures.put(t, bootstrapExecutor.submit(() -> bootstrapTenant(t)))); Set<TenantName> failed = new HashSet<>(); for (Map.Entry<TenantName, Future<?>> f : futures.entrySet()) { TenantName tenantName = f.getKey(); try { f.getValue().get(); } catch (ExecutionException e) { log.log(Level.WARNING, "Failed to create tenant " + tenantName, e); failed.add(tenantName); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while creating tenant '" + tenantName + "'", e); } } if (failed.size() > 0) throw new RuntimeException("Could not create all tenants when bootstrapping, failed to create: " + failed); metricUpdater.setTenants(tenants.size()); bootstrapExecutor.shutdown(); try { bootstrapExecutor.awaitTermination(365, TimeUnit.DAYS); } catch (InterruptedException e) { throw new RuntimeException("Executor for creating tenants did not terminate within timeout"); } } protected synchronized void bootstrapTenant(TenantName tenantName) { createTenant(tenantName, readCreatedTimeFromZooKeeper(tenantName)); } public Instant readCreatedTimeFromZooKeeper(TenantName tenantName) { Optional<Stat> stat = curator.getStat(getTenantPath(tenantName)); if (stat.isPresent()) return Instant.ofEpochMilli(stat.get().getCtime()); else return clock.instant(); } /** * Returns a default (compatibility with single tenant config requests) tenant * * @return default tenant */ public synchronized Tenant defaultTenant() { return tenants.get(DEFAULT_TENANT); } private void removeUnusedApplications() { getAllTenants().forEach(tenant -> tenant.getApplicationRepo().removeUnusedApplications()); } private void notifyNewTenant(Tenant tenant) { for (TenantListener listener : tenantListeners) { listener.onTenantCreate(tenant); } } private void notifyRemovedTenant(TenantName name) { for (TenantListener listener : tenantListeners) { listener.onTenantDelete(name); } } /** * Creates the tenants that should always be present into ZooKeeper. Will not fail if the node * already exists, as this is OK and might happen when several config servers start at the * same time and try to call this method. */ private synchronized void createSystemTenants(ConfigserverConfig configserverConfig) { List<TenantName> systemTenants = new ArrayList<>(); systemTenants.add(DEFAULT_TENANT); if (configserverConfig.hostedVespa()) systemTenants.add(HOSTED_VESPA_TENANT); for (final TenantName tenantName : systemTenants) { try { writeTenantPath(tenantName); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NodeExistsException.class) { throw e; } } } } /** * Writes the path of the given tenant into ZooKeeper, for watchers to react on * * @param name name of the tenant */ private synchronized void writeTenantPath(TenantName name) { curator.createAtomically(TenantRepository.getTenantPath(name), TenantRepository.getSessionsPath(name), TenantRepository.getApplicationsPath(name), TenantRepository.getLocksPath(name)); } /** * Removes the given tenant from ZooKeeper and filesystem. Assumes that tenant exists. * * @param name name of the tenant */ public synchronized void deleteTenant(TenantName name) { if (name.equals(DEFAULT_TENANT)) throw new IllegalArgumentException("Deleting 'default' tenant is not allowed"); if ( ! tenants.containsKey(name)) throw new IllegalArgumentException("Deleting '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Deleting tenant '" + name + "'"); Path path = tenants.get(name).getPath(); closeTenant(name); curator.delete(path); } private synchronized void closeTenant(TenantName name) { Tenant tenant = tenants.remove(name); if (tenant == null) throw new IllegalArgumentException("Closing '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Closing tenant '" + name + "'"); notifyRemovedTenant(name); tenant.close(); } /** * A helper to format a log preamble for messages with a tenant and app id * @param app the app * @return the log string */ public static String logPre(ApplicationId app) { if (DEFAULT_TENANT.equals(app.tenant())) return ""; StringBuilder ret = new StringBuilder() .append(logPre(app.tenant())) .append("app:"+app.application().value()) .append(":"+app.instance().value()) .append(" "); return ret.toString(); } /** * A helper to format a log preamble for messages with a tenant * @param tenant tenant * @return the log string */ public static String logPre(TenantName tenant) { if (DEFAULT_TENANT.equals(tenant)) return ""; StringBuilder ret = new StringBuilder() .append("tenant:" + tenant.value()) .append(" "); return ret.toString(); } private void stateChanged(CuratorFramework framework, ConnectionState connectionState) { switch (connectionState) { case CONNECTED: metricUpdater.incZKConnected(); break; case SUSPENDED: metricUpdater.incZKSuspended(); break; case RECONNECTED: metricUpdater.incZKReconnected(); break; case LOST: metricUpdater.incZKConnectionLost(); break; case READ_ONLY: break; } } private void childEvent(CuratorFramework framework, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: TenantName t1 = getTenantNameFromEvent(event); if ( ! tenants.containsKey(t1)) zkWatcherExecutor.execute(t1, () -> bootstrapTenant(t1)); break; case CHILD_REMOVED: TenantName t2 = getTenantNameFromEvent(event); if (tenants.containsKey(t2)) zkWatcherExecutor.execute(t2, () -> deleteTenant(t2)); break; default: break; } metricUpdater.setTenants(tenants.size()); } private TenantName getTenantNameFromEvent(PathChildrenCacheEvent event) { String path = event.getData().getPath(); String[] pathElements = path.split("/"); if (pathElements.length == 0) throw new IllegalArgumentException("Path " + path + " does not contain a tenant name"); return TenantName.from(pathElements[pathElements.length - 1]); } public void close() { directoryCache.ifPresent(Curator.DirectoryCache::close); try { zkCacheExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); zkWatcherExecutor.shutdownAndWait(); zkCacheExecutor.awaitTermination(50, TimeUnit.SECONDS); checkForRemovedApplicationsService.awaitTermination(50, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while shutting down.", e); Thread.currentThread().interrupt(); } } public boolean checkThatTenantExists(TenantName tenant) { return tenants.containsKey(tenant); } /** Returns the tenant with the given name, or {@code null} if this does not exist. */ public Tenant getTenant(TenantName tenantName) { return tenants.get(tenantName); } public Set<TenantName> getAllTenantNames() { return ImmutableSet.copyOf(tenants.keySet()); } public Collection<Tenant> getAllTenants() { return ImmutableSet.copyOf(tenants.values()); } /** * Gets zookeeper path for tenant data * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper data for a tenant */ public static Path getTenantPath(TenantName tenantName) { return tenantsPath.append(tenantName.value()); } /** * Gets zookeeper path for session data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper sessions data for a tenant */ public static Path getSessionsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.SESSIONS); } /** * Gets zookeeper path for application data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper application data for a tenant */ public static Path getApplicationsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.APPLICATIONS); } /** * Gets zookeeper path for locks for a tenant's applications. This is never cleaned, but shouldn't be a problem. */ public static Path getLocksPath(TenantName tenantName) { return locksPath.append(tenantName.value()); } public Curator getCurator() { return curator; } }
It is better to name it isBackend and make its value true as default.
private boolean handleHbResponse(HeartbeatResponse response, boolean isReplay) { switch (response.getType()) { case FRONTEND: { FrontendHbResponse hbResponse = (FrontendHbResponse) response; hbResponse.getBackendId2cpuCores().forEach((backendId, cpuCores) -> { Backend be = nodeMgr.getBackend(backendId); if (be != null && be.getCpuCores() != cpuCores) { be.setCpuCores(cpuCores); BackendCoreStat.setNumOfHardwareCoresOfBe(backendId, cpuCores); } }); Frontend fe = GlobalStateMgr.getCurrentState().getFeByName(hbResponse.getName()); if (fe != null) { return fe.handleHbResponse(hbResponse, isReplay); } break; } case BACKEND: { BackendHbResponse hbResponse = (BackendHbResponse) response; ComputeNode computeNode = nodeMgr.getBackend(hbResponse.getBeId()); boolean isComputeNode = false; if (computeNode == null) { computeNode = nodeMgr.getComputeNode(hbResponse.getBeId()); isComputeNode = true; } if (computeNode != null) { boolean isChanged = computeNode.handleHbResponse(hbResponse, isReplay); if (hbResponse.getStatus() != HbStatus.OK) { ClientPool.backendPool.clearPool(new TNetworkAddress(computeNode.getHost(), computeNode.getBePort())); if (!isReplay && !computeNode.isAlive()) { GlobalStateMgr.getCurrentState().getGlobalTransactionMgr() .abortTxnWhenCoordinateBeDown(computeNode.getHost(), 100); } } else { if (RunMode.allowCreateLakeTable() && !isReplay && !isComputeNode) { int starletPort = computeNode.getStarletPort(); if (starletPort != 0) { String workerAddr = computeNode.getHost() + ":" + starletPort; GlobalStateMgr.getCurrentState().getStarOSAgent().addWorker(computeNode.getId(), workerAddr); } } } return isChanged; } break; } case BROKER: { BrokerHbResponse hbResponse = (BrokerHbResponse) response; FsBroker broker = GlobalStateMgr.getCurrentState().getBrokerMgr().getBroker( hbResponse.getName(), hbResponse.getHost(), hbResponse.getPort()); if (broker != null) { boolean isChanged = broker.handleHbResponse(hbResponse, isReplay); if (hbResponse.getStatus() != HbStatus.OK) { ClientPool.brokerPool.clearPool(new TNetworkAddress(broker.ip, broker.port)); } return isChanged; } break; } default: break; } return false; }
boolean isComputeNode = false;
private boolean handleHbResponse(HeartbeatResponse response, boolean isReplay) { switch (response.getType()) { case FRONTEND: { FrontendHbResponse hbResponse = (FrontendHbResponse) response; hbResponse.getBackendId2cpuCores().forEach((backendId, cpuCores) -> { Backend be = nodeMgr.getBackend(backendId); if (be != null && be.getCpuCores() != cpuCores) { be.setCpuCores(cpuCores); BackendCoreStat.setNumOfHardwareCoresOfBe(backendId, cpuCores); } }); Frontend fe = GlobalStateMgr.getCurrentState().getFeByName(hbResponse.getName()); if (fe != null) { return fe.handleHbResponse(hbResponse, isReplay); } break; } case BACKEND: { BackendHbResponse hbResponse = (BackendHbResponse) response; ComputeNode computeNode = nodeMgr.getBackend(hbResponse.getBeId()); boolean isBackend = true; if (computeNode == null) { computeNode = nodeMgr.getComputeNode(hbResponse.getBeId()); isBackend = false; } if (computeNode != null) { boolean isChanged = computeNode.handleHbResponse(hbResponse, isReplay); if (hbResponse.getStatus() != HbStatus.OK) { ClientPool.backendPool.clearPool(new TNetworkAddress(computeNode.getHost(), computeNode.getBePort())); if (!isReplay && !computeNode.isAlive()) { GlobalStateMgr.getCurrentState().getGlobalTransactionMgr() .abortTxnWhenCoordinateBeDown(computeNode.getHost(), 100); } } else { if (RunMode.allowCreateLakeTable() && !isReplay && isBackend) { int starletPort = computeNode.getStarletPort(); if (starletPort != 0) { String workerAddr = computeNode.getHost() + ":" + starletPort; GlobalStateMgr.getCurrentState().getStarOSAgent().addWorker(computeNode.getId(), workerAddr); } } } return isChanged; } break; } case BROKER: { BrokerHbResponse hbResponse = (BrokerHbResponse) response; FsBroker broker = GlobalStateMgr.getCurrentState().getBrokerMgr().getBroker( hbResponse.getName(), hbResponse.getHost(), hbResponse.getPort()); if (broker != null) { boolean isChanged = broker.handleHbResponse(hbResponse, isReplay); if (hbResponse.getStatus() != HbStatus.OK) { ClientPool.brokerPool.clearPool(new TNetworkAddress(broker.ip, broker.port)); } return isChanged; } break; } default: break; } return false; }
class HeartbeatMgr extends LeaderDaemon { private static final Logger LOG = LogManager.getLogger(HeartbeatMgr.class); private final ExecutorService executor; private final SystemInfoService nodeMgr; private final HeartbeatFlags heartbeatFlags; private static AtomicReference<TMasterInfo> masterInfo = new AtomicReference<>(); public HeartbeatMgr(SystemInfoService nodeMgr, boolean needRegisterMetric) { super("heartbeat mgr", Config.heartbeat_timeout_second * 1000L); this.nodeMgr = nodeMgr; this.executor = ThreadPoolManager.newDaemonFixedThreadPool(Config.heartbeat_mgr_threads_num, Config.heartbeat_mgr_blocking_queue_size, "heartbeat-mgr-pool", needRegisterMetric); this.heartbeatFlags = new HeartbeatFlags(); } private long computeMinActiveTxnId() { long a = GlobalStateMgr.getCurrentGlobalTransactionMgr().getMinActiveTxnId(); Optional<Long> b = GlobalStateMgr.getCurrentState().getSchemaChangeHandler().getMinActiveTxnId(); return Math.min(a, b.orElse(Long.MAX_VALUE)); } public void setLeader(int clusterId, String token, long epoch) { TMasterInfo tMasterInfo = new TMasterInfo( new TNetworkAddress(FrontendOptions.getLocalHostAddress(), Config.rpc_port), clusterId, epoch); tMasterInfo.setToken(token); tMasterInfo.setHttp_port(Config.http_port); long flags = heartbeatFlags.getHeartbeatFlags(); tMasterInfo.setHeartbeat_flags(flags); tMasterInfo.setMin_active_txn_id(computeMinActiveTxnId()); masterInfo.set(tMasterInfo); } /** * At each round: * 1. send heartbeat to all nodes * 2. collect the heartbeat response from all nodes, and handle them */ @Override protected void runAfterCatalogReady() { ImmutableMap<Long, Backend> idToBackendRef = nodeMgr.getIdToBackend(); if (idToBackendRef == null) { return; } List<Future<HeartbeatResponse>> hbResponses = Lists.newArrayList(); for (Backend backend : idToBackendRef.values()) { BackendHeartbeatHandler handler = new BackendHeartbeatHandler(backend); hbResponses.add(executor.submit(handler)); } for (ComputeNode computeNode : nodeMgr.getIdComputeNode().values()) { BackendHeartbeatHandler handler = new BackendHeartbeatHandler(computeNode); hbResponses.add(executor.submit(handler)); } List<Frontend> frontends = GlobalStateMgr.getCurrentState().getFrontends(null); String masterFeNodeName = ""; for (Frontend frontend : frontends) { if (frontend.getHost().equals(masterInfo.get().getNetwork_address().getHostname())) { masterFeNodeName = frontend.getNodeName(); } FrontendHeartbeatHandler handler = new FrontendHeartbeatHandler(frontend, GlobalStateMgr.getCurrentState().getClusterId(), GlobalStateMgr.getCurrentState().getToken()); hbResponses.add(executor.submit(handler)); } Map<String, List<FsBroker>> brokerMap = Maps.newHashMap( GlobalStateMgr.getCurrentState().getBrokerMgr().getBrokerListMap()); for (Map.Entry<String, List<FsBroker>> entry : brokerMap.entrySet()) { for (FsBroker brokerAddress : entry.getValue()) { BrokerHeartbeatHandler handler = new BrokerHeartbeatHandler(entry.getKey(), brokerAddress, masterInfo.get().getNetwork_address().getHostname()); hbResponses.add(executor.submit(handler)); } } HbPackage hbPackage = new HbPackage(); for (Future<HeartbeatResponse> future : hbResponses) { boolean isChanged = false; try { HeartbeatResponse response = future.get(); if (response.getStatus() != HbStatus.OK) { LOG.warn("get bad heartbeat response: {}", response); } isChanged = handleHbResponse(response, false); if (isChanged) { hbPackage.addHbResponse(response); } } catch (InterruptedException | ExecutionException e) { LOG.warn("got exception when doing heartbeat", e); } } Map<Long, Integer> backendId2cpuCores = Maps.newHashMap(); idToBackendRef.values().forEach( backend -> backendId2cpuCores.put(backend.getId(), BackendCoreStat.getCoresOfBe(backend.getId()))); hbPackage.addHbResponse(new FrontendHbResponse(masterFeNodeName, Config.query_port, Config.rpc_port, GlobalStateMgr.getCurrentState().getMaxJournalId(), System.currentTimeMillis(), GlobalStateMgr.getCurrentState().getFeStartTime(), Version.STARROCKS_VERSION + "-" + Version.STARROCKS_COMMIT_HASH, backendId2cpuCores)); GlobalStateMgr.getCurrentState().getEditLog().logHeartbeat(hbPackage); } private class BackendHeartbeatHandler implements Callable<HeartbeatResponse> { private ComputeNode computeNode; public BackendHeartbeatHandler(ComputeNode computeNode) { this.computeNode = computeNode; } @Override public HeartbeatResponse call() { long computeNodeId = computeNode.getId(); HeartbeatService.Client client = null; TNetworkAddress beAddr = new TNetworkAddress(computeNode.getHost(), computeNode.getHeartbeatPort()); boolean ok = false; try { client = ClientPool.heartbeatPool.borrowObject(beAddr); TMasterInfo copiedMasterInfo = new TMasterInfo(masterInfo.get()); copiedMasterInfo.setBackend_ip(computeNode.getHost()); long flags = heartbeatFlags.getHeartbeatFlags(); copiedMasterInfo.setHeartbeat_flags(flags); copiedMasterInfo.setBackend_id(computeNodeId); copiedMasterInfo.setMin_active_txn_id(computeMinActiveTxnId()); THeartbeatResult result = client.heartbeat(copiedMasterInfo); ok = true; if (result.getStatus().getStatus_code() == TStatusCode.OK) { TBackendInfo tBackendInfo = result.getBackend_info(); int bePort = tBackendInfo.getBe_port(); int httpPort = tBackendInfo.getHttp_port(); int brpcPort = -1; int starletPort = 0; if (tBackendInfo.isSetBrpc_port()) { brpcPort = tBackendInfo.getBrpc_port(); } if (tBackendInfo.isSetStarlet_port()) { starletPort = tBackendInfo.getStarlet_port(); } String version = ""; if (tBackendInfo.isSetVersion()) { version = tBackendInfo.getVersion(); } int cpuCores = tBackendInfo.isSetNum_hardware_cores() ? tBackendInfo.getNum_hardware_cores() : 0; if (tBackendInfo.isSetNum_hardware_cores()) { BackendCoreStat.setNumOfHardwareCoresOfBe(computeNodeId, cpuCores); } BackendHbResponse backendHbResponse = new BackendHbResponse( computeNodeId, bePort, httpPort, brpcPort, starletPort, System.currentTimeMillis(), version, cpuCores); if (tBackendInfo.isSetReboot_time()) { backendHbResponse.setRebootTime(tBackendInfo.getReboot_time()); } return backendHbResponse; } else { return new BackendHbResponse(computeNodeId, result.getStatus().getError_msgs().isEmpty() ? "Unknown error" : result.getStatus().getError_msgs().get(0)); } } catch (Exception e) { LOG.warn("backend heartbeat got exception, addr: {}:{}", computeNode.getHost(), computeNode.getHeartbeatPort(), e); return new BackendHbResponse(computeNodeId, Strings.isNullOrEmpty(e.getMessage()) ? "got exception" : e.getMessage()); } finally { if (ok) { ClientPool.heartbeatPool.returnObject(beAddr, client); } else { ClientPool.heartbeatPool.invalidateObject(beAddr, client); } } } } public static class FrontendHeartbeatHandler implements Callable<HeartbeatResponse> { private Frontend fe; private int clusterId; private String token; public FrontendHeartbeatHandler(Frontend fe, int clusterId, String token) { this.fe = fe; this.clusterId = clusterId; this.token = token; } @Override public HeartbeatResponse call() { if (fe.getHost().equals(GlobalStateMgr.getCurrentState().getSelfNode().first)) { if (GlobalStateMgr.getCurrentState().isReady()) { return new FrontendHbResponse(fe.getNodeName(), Config.query_port, Config.rpc_port, GlobalStateMgr.getCurrentState().getReplayedJournalId(), System.currentTimeMillis(), GlobalStateMgr.getCurrentState().getFeStartTime(), Version.STARROCKS_VERSION + "-" + Version.STARROCKS_COMMIT_HASH); } else { return new FrontendHbResponse(fe.getNodeName(), "not ready"); } } String url = "http: + "/api/bootstrap?cluster_id=" + clusterId + "&token=" + token; try { String result = Util.getResultForUrl(url, null, 2000, 2000); /* * return: * {"replayedJournalId":191224,"queryPort":9131,"rpcPort":9121,"status":"OK","msg":"Success"} * {"replayedJournalId":0,"queryPort":0,"rpcPort":0,"status":"FAILED","msg":"not ready"} */ JSONObject root = new JSONObject(result); String status = root.getString("status"); if (!"OK".equals(status)) { return new FrontendHbResponse(fe.getNodeName(), root.getString("msg")); } else { long replayedJournalId = root.getLong(BootstrapFinishAction.REPLAYED_JOURNAL_ID); int queryPort = root.getInt(BootstrapFinishAction.QUERY_PORT); int rpcPort = root.getInt(BootstrapFinishAction.RPC_PORT); long feStartTime = root.getLong(BootstrapFinishAction.FE_START_TIME); String feVersion = root.getString(BootstrapFinishAction.FE_VERSION); return new FrontendHbResponse(fe.getNodeName(), queryPort, rpcPort, replayedJournalId, System.currentTimeMillis(), feStartTime, feVersion); } } catch (Exception e) { return new FrontendHbResponse(fe.getNodeName(), Strings.isNullOrEmpty(e.getMessage()) ? "got exception" : e.getMessage()); } } } public static class BrokerHeartbeatHandler implements Callable<HeartbeatResponse> { private String brokerName; private FsBroker broker; private String clientId; public BrokerHeartbeatHandler(String brokerName, FsBroker broker, String clientId) { this.brokerName = brokerName; this.broker = broker; this.clientId = clientId; } @Override public HeartbeatResponse call() { TFileBrokerService.Client client = null; TNetworkAddress addr = new TNetworkAddress(broker.ip, broker.port); boolean ok = false; try { client = ClientPool.brokerPool.borrowObject(addr); TBrokerPingBrokerRequest request = new TBrokerPingBrokerRequest(TBrokerVersion.VERSION_ONE, clientId); TBrokerOperationStatus status = client.ping(request); ok = true; if (status.getStatusCode() != TBrokerOperationStatusCode.OK) { return new BrokerHbResponse(brokerName, broker.ip, broker.port, status.getMessage()); } else { return new BrokerHbResponse(brokerName, broker.ip, broker.port, System.currentTimeMillis()); } } catch (Exception e) { return new BrokerHbResponse(brokerName, broker.ip, broker.port, Strings.isNullOrEmpty(e.getMessage()) ? "got exception" : e.getMessage()); } finally { if (ok) { ClientPool.brokerPool.returnObject(addr, client); } else { ClientPool.brokerPool.invalidateObject(addr, client); } } } } public void replayHearbeat(HbPackage hbPackage) { for (HeartbeatResponse hbResult : hbPackage.getHbResults()) { handleHbResponse(hbResult, true); } } }
class HeartbeatMgr extends LeaderDaemon { private static final Logger LOG = LogManager.getLogger(HeartbeatMgr.class); private final ExecutorService executor; private final SystemInfoService nodeMgr; private final HeartbeatFlags heartbeatFlags; private static AtomicReference<TMasterInfo> masterInfo = new AtomicReference<>(); public HeartbeatMgr(SystemInfoService nodeMgr, boolean needRegisterMetric) { super("heartbeat mgr", Config.heartbeat_timeout_second * 1000L); this.nodeMgr = nodeMgr; this.executor = ThreadPoolManager.newDaemonFixedThreadPool(Config.heartbeat_mgr_threads_num, Config.heartbeat_mgr_blocking_queue_size, "heartbeat-mgr-pool", needRegisterMetric); this.heartbeatFlags = new HeartbeatFlags(); } private long computeMinActiveTxnId() { long a = GlobalStateMgr.getCurrentGlobalTransactionMgr().getMinActiveTxnId(); Optional<Long> b = GlobalStateMgr.getCurrentState().getSchemaChangeHandler().getMinActiveTxnId(); return Math.min(a, b.orElse(Long.MAX_VALUE)); } public void setLeader(int clusterId, String token, long epoch) { TMasterInfo tMasterInfo = new TMasterInfo( new TNetworkAddress(FrontendOptions.getLocalHostAddress(), Config.rpc_port), clusterId, epoch); tMasterInfo.setToken(token); tMasterInfo.setHttp_port(Config.http_port); long flags = heartbeatFlags.getHeartbeatFlags(); tMasterInfo.setHeartbeat_flags(flags); tMasterInfo.setMin_active_txn_id(computeMinActiveTxnId()); masterInfo.set(tMasterInfo); } /** * At each round: * 1. send heartbeat to all nodes * 2. collect the heartbeat response from all nodes, and handle them */ @Override protected void runAfterCatalogReady() { ImmutableMap<Long, Backend> idToBackendRef = nodeMgr.getIdToBackend(); if (idToBackendRef == null) { return; } List<Future<HeartbeatResponse>> hbResponses = Lists.newArrayList(); for (Backend backend : idToBackendRef.values()) { BackendHeartbeatHandler handler = new BackendHeartbeatHandler(backend); hbResponses.add(executor.submit(handler)); } for (ComputeNode computeNode : nodeMgr.getIdComputeNode().values()) { BackendHeartbeatHandler handler = new BackendHeartbeatHandler(computeNode); hbResponses.add(executor.submit(handler)); } List<Frontend> frontends = GlobalStateMgr.getCurrentState().getFrontends(null); String masterFeNodeName = ""; for (Frontend frontend : frontends) { if (frontend.getHost().equals(masterInfo.get().getNetwork_address().getHostname())) { masterFeNodeName = frontend.getNodeName(); } FrontendHeartbeatHandler handler = new FrontendHeartbeatHandler(frontend, GlobalStateMgr.getCurrentState().getClusterId(), GlobalStateMgr.getCurrentState().getToken()); hbResponses.add(executor.submit(handler)); } Map<String, List<FsBroker>> brokerMap = Maps.newHashMap( GlobalStateMgr.getCurrentState().getBrokerMgr().getBrokerListMap()); for (Map.Entry<String, List<FsBroker>> entry : brokerMap.entrySet()) { for (FsBroker brokerAddress : entry.getValue()) { BrokerHeartbeatHandler handler = new BrokerHeartbeatHandler(entry.getKey(), brokerAddress, masterInfo.get().getNetwork_address().getHostname()); hbResponses.add(executor.submit(handler)); } } HbPackage hbPackage = new HbPackage(); for (Future<HeartbeatResponse> future : hbResponses) { boolean isChanged = false; try { HeartbeatResponse response = future.get(); if (response.getStatus() != HbStatus.OK) { LOG.warn("get bad heartbeat response: {}", response); } isChanged = handleHbResponse(response, false); if (isChanged) { hbPackage.addHbResponse(response); } } catch (InterruptedException | ExecutionException e) { LOG.warn("got exception when doing heartbeat", e); } } Map<Long, Integer> backendId2cpuCores = Maps.newHashMap(); idToBackendRef.values().forEach( backend -> backendId2cpuCores.put(backend.getId(), BackendCoreStat.getCoresOfBe(backend.getId()))); hbPackage.addHbResponse(new FrontendHbResponse(masterFeNodeName, Config.query_port, Config.rpc_port, GlobalStateMgr.getCurrentState().getMaxJournalId(), System.currentTimeMillis(), GlobalStateMgr.getCurrentState().getFeStartTime(), Version.STARROCKS_VERSION + "-" + Version.STARROCKS_COMMIT_HASH, backendId2cpuCores)); GlobalStateMgr.getCurrentState().getEditLog().logHeartbeat(hbPackage); } private class BackendHeartbeatHandler implements Callable<HeartbeatResponse> { private ComputeNode computeNode; public BackendHeartbeatHandler(ComputeNode computeNode) { this.computeNode = computeNode; } @Override public HeartbeatResponse call() { long computeNodeId = computeNode.getId(); HeartbeatService.Client client = null; TNetworkAddress beAddr = new TNetworkAddress(computeNode.getHost(), computeNode.getHeartbeatPort()); boolean ok = false; try { client = ClientPool.heartbeatPool.borrowObject(beAddr); TMasterInfo copiedMasterInfo = new TMasterInfo(masterInfo.get()); copiedMasterInfo.setBackend_ip(computeNode.getHost()); long flags = heartbeatFlags.getHeartbeatFlags(); copiedMasterInfo.setHeartbeat_flags(flags); copiedMasterInfo.setBackend_id(computeNodeId); copiedMasterInfo.setMin_active_txn_id(computeMinActiveTxnId()); THeartbeatResult result = client.heartbeat(copiedMasterInfo); ok = true; if (result.getStatus().getStatus_code() == TStatusCode.OK) { TBackendInfo tBackendInfo = result.getBackend_info(); int bePort = tBackendInfo.getBe_port(); int httpPort = tBackendInfo.getHttp_port(); int brpcPort = -1; int starletPort = 0; if (tBackendInfo.isSetBrpc_port()) { brpcPort = tBackendInfo.getBrpc_port(); } if (tBackendInfo.isSetStarlet_port()) { starletPort = tBackendInfo.getStarlet_port(); } String version = ""; if (tBackendInfo.isSetVersion()) { version = tBackendInfo.getVersion(); } int cpuCores = tBackendInfo.isSetNum_hardware_cores() ? tBackendInfo.getNum_hardware_cores() : 0; if (tBackendInfo.isSetNum_hardware_cores()) { BackendCoreStat.setNumOfHardwareCoresOfBe(computeNodeId, cpuCores); } BackendHbResponse backendHbResponse = new BackendHbResponse( computeNodeId, bePort, httpPort, brpcPort, starletPort, System.currentTimeMillis(), version, cpuCores); if (tBackendInfo.isSetReboot_time()) { backendHbResponse.setRebootTime(tBackendInfo.getReboot_time()); } return backendHbResponse; } else { return new BackendHbResponse(computeNodeId, result.getStatus().getError_msgs().isEmpty() ? "Unknown error" : result.getStatus().getError_msgs().get(0)); } } catch (Exception e) { LOG.warn("backend heartbeat got exception, addr: {}:{}", computeNode.getHost(), computeNode.getHeartbeatPort(), e); return new BackendHbResponse(computeNodeId, Strings.isNullOrEmpty(e.getMessage()) ? "got exception" : e.getMessage()); } finally { if (ok) { ClientPool.heartbeatPool.returnObject(beAddr, client); } else { ClientPool.heartbeatPool.invalidateObject(beAddr, client); } } } } public static class FrontendHeartbeatHandler implements Callable<HeartbeatResponse> { private Frontend fe; private int clusterId; private String token; public FrontendHeartbeatHandler(Frontend fe, int clusterId, String token) { this.fe = fe; this.clusterId = clusterId; this.token = token; } @Override public HeartbeatResponse call() { if (fe.getHost().equals(GlobalStateMgr.getCurrentState().getSelfNode().first)) { if (GlobalStateMgr.getCurrentState().isReady()) { return new FrontendHbResponse(fe.getNodeName(), Config.query_port, Config.rpc_port, GlobalStateMgr.getCurrentState().getReplayedJournalId(), System.currentTimeMillis(), GlobalStateMgr.getCurrentState().getFeStartTime(), Version.STARROCKS_VERSION + "-" + Version.STARROCKS_COMMIT_HASH); } else { return new FrontendHbResponse(fe.getNodeName(), "not ready"); } } String url = "http: + "/api/bootstrap?cluster_id=" + clusterId + "&token=" + token; try { String result = Util.getResultForUrl(url, null, 2000, 2000); /* * return: * {"replayedJournalId":191224,"queryPort":9131,"rpcPort":9121,"status":"OK","msg":"Success"} * {"replayedJournalId":0,"queryPort":0,"rpcPort":0,"status":"FAILED","msg":"not ready"} */ JSONObject root = new JSONObject(result); String status = root.getString("status"); if (!"OK".equals(status)) { return new FrontendHbResponse(fe.getNodeName(), root.getString("msg")); } else { long replayedJournalId = root.getLong(BootstrapFinishAction.REPLAYED_JOURNAL_ID); int queryPort = root.getInt(BootstrapFinishAction.QUERY_PORT); int rpcPort = root.getInt(BootstrapFinishAction.RPC_PORT); long feStartTime = root.getLong(BootstrapFinishAction.FE_START_TIME); String feVersion = root.getString(BootstrapFinishAction.FE_VERSION); return new FrontendHbResponse(fe.getNodeName(), queryPort, rpcPort, replayedJournalId, System.currentTimeMillis(), feStartTime, feVersion); } } catch (Exception e) { return new FrontendHbResponse(fe.getNodeName(), Strings.isNullOrEmpty(e.getMessage()) ? "got exception" : e.getMessage()); } } } public static class BrokerHeartbeatHandler implements Callable<HeartbeatResponse> { private String brokerName; private FsBroker broker; private String clientId; public BrokerHeartbeatHandler(String brokerName, FsBroker broker, String clientId) { this.brokerName = brokerName; this.broker = broker; this.clientId = clientId; } @Override public HeartbeatResponse call() { TFileBrokerService.Client client = null; TNetworkAddress addr = new TNetworkAddress(broker.ip, broker.port); boolean ok = false; try { client = ClientPool.brokerPool.borrowObject(addr); TBrokerPingBrokerRequest request = new TBrokerPingBrokerRequest(TBrokerVersion.VERSION_ONE, clientId); TBrokerOperationStatus status = client.ping(request); ok = true; if (status.getStatusCode() != TBrokerOperationStatusCode.OK) { return new BrokerHbResponse(brokerName, broker.ip, broker.port, status.getMessage()); } else { return new BrokerHbResponse(brokerName, broker.ip, broker.port, System.currentTimeMillis()); } } catch (Exception e) { return new BrokerHbResponse(brokerName, broker.ip, broker.port, Strings.isNullOrEmpty(e.getMessage()) ? "got exception" : e.getMessage()); } finally { if (ok) { ClientPool.brokerPool.returnObject(addr, client); } else { ClientPool.brokerPool.invalidateObject(addr, client); } } } } public void replayHearbeat(HbPackage hbPackage) { for (HeartbeatResponse hbResult : hbPackage.getHbResults()) { handleHbResponse(hbResult, true); } } }
yup
private Tenant createTenant(TenantName tenantName, Instant created) { if (tenants.containsKey(tenantName)) return getTenant(tenantName); TenantApplications applicationRepo = new TenantApplications(tenantName, curator, zkWatcherExecutor, zkCacheExecutor, metrics, reloadListener, configserverConfig, hostRegistry, new TenantFileSystemDirs(configServerDB, tenantName), clock); PermanentApplicationPackage permanentApplicationPackage = new PermanentApplicationPackage(configserverConfig); SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory, hostProvisionerProvider, permanentApplicationPackage, configserverConfig, configDefinitionRepo, curator, zone, flagSource, secretStore); SessionRepository sessionRepository = new SessionRepository(tenantName, applicationRepo, sessionPreparer, curator, metrics, zkWatcherExecutor, permanentApplicationPackage, flagSource, zkCacheExecutor, secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, clock, modelFactoryRegistry, configDefinitionRepo, tenantListener); log.log(Level.INFO, "Adding tenant '" + tenantName + "'" + ", created " + created); Tenant tenant = new Tenant(tenantName, sessionRepository, applicationRepo, applicationRepo, created); createAndWriteTenantMetaData(tenant); tenants.putIfAbsent(tenantName, tenant); notifyNewTenant(tenant); return tenant; }
if (tenants.containsKey(tenantName)) return getTenant(tenantName);
private Tenant createTenant(TenantName tenantName, Instant created) { if (tenants.containsKey(tenantName)) return getTenant(tenantName); TenantApplications applicationRepo = new TenantApplications(tenantName, curator, zkWatcherExecutor, zkCacheExecutor, metrics, reloadListener, configserverConfig, hostRegistry, new TenantFileSystemDirs(configServerDB, tenantName), clock); PermanentApplicationPackage permanentApplicationPackage = new PermanentApplicationPackage(configserverConfig); SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory, hostProvisionerProvider, permanentApplicationPackage, configserverConfig, configDefinitionRepo, curator, zone, flagSource, secretStore); SessionRepository sessionRepository = new SessionRepository(tenantName, applicationRepo, sessionPreparer, curator, metrics, zkWatcherExecutor, permanentApplicationPackage, flagSource, zkCacheExecutor, secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, clock, modelFactoryRegistry, configDefinitionRepo, tenantListener); log.log(Level.INFO, "Adding tenant '" + tenantName + "'" + ", created " + created); Tenant tenant = new Tenant(tenantName, sessionRepository, applicationRepo, applicationRepo, created); createAndWriteTenantMetaData(tenant); tenants.putIfAbsent(tenantName, tenant); notifyNewTenant(tenant); return tenant; }
class TenantRepository { public static final TenantName HOSTED_VESPA_TENANT = TenantName.from("hosted-vespa"); private static final TenantName DEFAULT_TENANT = TenantName.defaultName(); private static final Path tenantsPath = Path.fromString("/config/v2/tenants/"); private static final Path locksPath = Path.fromString("/config/v2/locks/"); private static final Path vespaPath = Path.fromString("/vespa"); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private static final Logger log = Logger.getLogger(TenantRepository.class.getName()); private final Map<TenantName, Tenant> tenants = Collections.synchronizedMap(new LinkedHashMap<>()); private final HostRegistry hostRegistry; private final List<TenantListener> tenantListeners = Collections.synchronizedList(new ArrayList<>()); private final Curator curator; private final Metrics metrics; private final MetricUpdater metricUpdater; private final ExecutorService zkCacheExecutor; private final StripedExecutor<TenantName> zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final FlagSource flagSource; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final Clock clock; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final ReloadListener reloadListener; private final TenantListener tenantListener; private final ExecutorService bootstrapExecutor; private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("check for removed applications")); private final Optional<Curator.DirectoryCache> directoryCache; /** * Creates a new tenant repository * */ @Inject public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, FlagSource flagSource, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this(hostRegistry, curator, metrics, new StripedExecutor<>(), new FileDistributionFactory(configserverConfig), flagSource, Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(TenantRepository.class.getName())), secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, Clock.systemUTC(), modelFactoryRegistry, configDefinitionRepo, reloadListener, tenantListener); } public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this.hostRegistry = hostRegistry; this.configserverConfig = configserverConfig; this.bootstrapExecutor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("bootstrap tenants")); this.curator = curator; this.metrics = metrics; metricUpdater = metrics.getOrCreateMetricUpdater(Collections.emptyMap()); this.tenantListeners.add(tenantListener); this.zkCacheExecutor = zkCacheExecutor; this.zkWatcherExecutor = zkWatcherExecutor; this.fileDistributionFactory = fileDistributionFactory; this.flagSource = flagSource; this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configServerDB = configServerDB; this.zone = zone; this.clock = clock; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.reloadListener = reloadListener; this.tenantListener = tenantListener; curator.framework().getConnectionStateListenable().addListener(this::stateChanged); curator.create(tenantsPath); curator.create(locksPath); createSystemTenants(configserverConfig); curator.create(vespaPath); this.directoryCache = Optional.of(curator.createDirectoryCache(tenantsPath.getAbsolute(), false, false, zkCacheExecutor)); this.directoryCache.get().addListener(this::childEvent); this.directoryCache.get().start(); bootstrapTenants(); notifyTenantsLoaded(); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeUnusedApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } private void notifyTenantsLoaded() { for (TenantListener tenantListener : tenantListeners) { tenantListener.onTenantsLoaded(); } } public synchronized Tenant addTenant(TenantName tenantName) { writeTenantPath(tenantName); return createTenant(tenantName, clock.instant()); } public void createAndWriteTenantMetaData(Tenant tenant) { createWriteTenantMetaDataTransaction(createMetaData(tenant)).commit(); } public Transaction createWriteTenantMetaDataTransaction(TenantMetaData tenantMetaData) { return new CuratorTransaction(curator).add( CuratorOperations.setData(TenantRepository.getTenantPath(tenantMetaData.tenantName()).getAbsolute(), tenantMetaData.asJsonBytes())); } private TenantMetaData createMetaData(Tenant tenant) { Instant deployTime = tenant.getSessionRepository().clock().instant(); Instant createdTime = getTenantMetaData(tenant).createdTimestamp(); if (createdTime.equals(Instant.EPOCH)) createdTime = deployTime; return new TenantMetaData(tenant.getName(), deployTime, createdTime); } public TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); Optional<TenantMetaData> metaData; try { metaData = data.map(bytes -> TenantMetaData.fromJsonString(tenant.getName(), Utf8.toString(bytes))); } catch (IllegalArgumentException e) { metaData = Optional.empty(); } return metaData.orElse(new TenantMetaData(tenant.getName(), tenant.getCreatedTime(), tenant.getCreatedTime())); } private static Set<TenantName> readTenantsFromZooKeeper(Curator curator) { return curator.getChildren(tenantsPath).stream().map(TenantName::from).collect(Collectors.toSet()); } private void bootstrapTenants() { Map<TenantName, Future<?>> futures = new HashMap<>(); readTenantsFromZooKeeper(curator).forEach(t -> futures.put(t, bootstrapExecutor.submit(() -> bootstrapTenant(t)))); Set<TenantName> failed = new HashSet<>(); for (Map.Entry<TenantName, Future<?>> f : futures.entrySet()) { TenantName tenantName = f.getKey(); try { f.getValue().get(); } catch (ExecutionException e) { log.log(Level.WARNING, "Failed to create tenant " + tenantName, e); failed.add(tenantName); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while creating tenant '" + tenantName + "'", e); } } if (failed.size() > 0) throw new RuntimeException("Could not create all tenants when bootstrapping, failed to create: " + failed); metricUpdater.setTenants(tenants.size()); bootstrapExecutor.shutdown(); try { bootstrapExecutor.awaitTermination(365, TimeUnit.DAYS); } catch (InterruptedException e) { throw new RuntimeException("Executor for creating tenants did not terminate within timeout"); } } protected synchronized void bootstrapTenant(TenantName tenantName) { createTenant(tenantName, readCreatedTimeFromZooKeeper(tenantName)); } public Instant readCreatedTimeFromZooKeeper(TenantName tenantName) { Optional<Stat> stat = curator.getStat(getTenantPath(tenantName)); if (stat.isPresent()) return Instant.ofEpochMilli(stat.get().getCtime()); else return clock.instant(); } /** * Returns a default (compatibility with single tenant config requests) tenant * * @return default tenant */ public synchronized Tenant defaultTenant() { return tenants.get(DEFAULT_TENANT); } private void removeUnusedApplications() { getAllTenants().forEach(tenant -> tenant.getApplicationRepo().removeUnusedApplications()); } private void notifyNewTenant(Tenant tenant) { for (TenantListener listener : tenantListeners) { listener.onTenantCreate(tenant); } } private void notifyRemovedTenant(TenantName name) { for (TenantListener listener : tenantListeners) { listener.onTenantDelete(name); } } /** * Creates the tenants that should always be present into ZooKeeper. Will not fail if the node * already exists, as this is OK and might happen when several config servers start at the * same time and try to call this method. */ private synchronized void createSystemTenants(ConfigserverConfig configserverConfig) { List<TenantName> systemTenants = new ArrayList<>(); systemTenants.add(DEFAULT_TENANT); if (configserverConfig.hostedVespa()) systemTenants.add(HOSTED_VESPA_TENANT); for (final TenantName tenantName : systemTenants) { try { writeTenantPath(tenantName); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NodeExistsException.class) { throw e; } } } } /** * Writes the path of the given tenant into ZooKeeper, for watchers to react on * * @param name name of the tenant */ private synchronized void writeTenantPath(TenantName name) { curator.createAtomically(TenantRepository.getTenantPath(name), TenantRepository.getSessionsPath(name), TenantRepository.getApplicationsPath(name), TenantRepository.getLocksPath(name)); } /** * Removes the given tenant from ZooKeeper and filesystem. Assumes that tenant exists. * * @param name name of the tenant */ public synchronized void deleteTenant(TenantName name) { if (name.equals(DEFAULT_TENANT)) throw new IllegalArgumentException("Deleting 'default' tenant is not allowed"); if ( ! tenants.containsKey(name)) throw new IllegalArgumentException("Deleting '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Deleting tenant '" + name + "'"); Path path = tenants.get(name).getPath(); closeTenant(name); curator.delete(path); } private synchronized void closeTenant(TenantName name) { Tenant tenant = tenants.remove(name); if (tenant == null) throw new IllegalArgumentException("Closing '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Closing tenant '" + name + "'"); notifyRemovedTenant(name); tenant.close(); } /** * A helper to format a log preamble for messages with a tenant and app id * @param app the app * @return the log string */ public static String logPre(ApplicationId app) { if (DEFAULT_TENANT.equals(app.tenant())) return ""; StringBuilder ret = new StringBuilder() .append(logPre(app.tenant())) .append("app:"+app.application().value()) .append(":"+app.instance().value()) .append(" "); return ret.toString(); } /** * A helper to format a log preamble for messages with a tenant * @param tenant tenant * @return the log string */ public static String logPre(TenantName tenant) { if (DEFAULT_TENANT.equals(tenant)) return ""; StringBuilder ret = new StringBuilder() .append("tenant:" + tenant.value()) .append(" "); return ret.toString(); } private void stateChanged(CuratorFramework framework, ConnectionState connectionState) { switch (connectionState) { case CONNECTED: metricUpdater.incZKConnected(); break; case SUSPENDED: metricUpdater.incZKSuspended(); break; case RECONNECTED: metricUpdater.incZKReconnected(); break; case LOST: metricUpdater.incZKConnectionLost(); break; case READ_ONLY: break; } } private void childEvent(CuratorFramework framework, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: TenantName t1 = getTenantNameFromEvent(event); if ( ! tenants.containsKey(t1)) zkWatcherExecutor.execute(t1, () -> bootstrapTenant(t1)); break; case CHILD_REMOVED: TenantName t2 = getTenantNameFromEvent(event); if (tenants.containsKey(t2)) zkWatcherExecutor.execute(t2, () -> deleteTenant(t2)); break; default: break; } metricUpdater.setTenants(tenants.size()); } private TenantName getTenantNameFromEvent(PathChildrenCacheEvent event) { String path = event.getData().getPath(); String[] pathElements = path.split("/"); if (pathElements.length == 0) throw new IllegalArgumentException("Path " + path + " does not contain a tenant name"); return TenantName.from(pathElements[pathElements.length - 1]); } public void close() { directoryCache.ifPresent(Curator.DirectoryCache::close); try { zkCacheExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); zkWatcherExecutor.shutdownAndWait(); zkCacheExecutor.awaitTermination(50, TimeUnit.SECONDS); checkForRemovedApplicationsService.awaitTermination(50, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while shutting down.", e); Thread.currentThread().interrupt(); } } public boolean checkThatTenantExists(TenantName tenant) { return tenants.containsKey(tenant); } /** Returns the tenant with the given name, or {@code null} if this does not exist. */ public Tenant getTenant(TenantName tenantName) { return tenants.get(tenantName); } public Set<TenantName> getAllTenantNames() { return ImmutableSet.copyOf(tenants.keySet()); } public Collection<Tenant> getAllTenants() { return ImmutableSet.copyOf(tenants.values()); } /** * Gets zookeeper path for tenant data * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper data for a tenant */ public static Path getTenantPath(TenantName tenantName) { return tenantsPath.append(tenantName.value()); } /** * Gets zookeeper path for session data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper sessions data for a tenant */ public static Path getSessionsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.SESSIONS); } /** * Gets zookeeper path for application data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper application data for a tenant */ public static Path getApplicationsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.APPLICATIONS); } /** * Gets zookeeper path for locks for a tenant's applications. This is never cleaned, but shouldn't be a problem. */ public static Path getLocksPath(TenantName tenantName) { return locksPath.append(tenantName.value()); } public Curator getCurator() { return curator; } }
class TenantRepository { public static final TenantName HOSTED_VESPA_TENANT = TenantName.from("hosted-vespa"); private static final TenantName DEFAULT_TENANT = TenantName.defaultName(); private static final Path tenantsPath = Path.fromString("/config/v2/tenants/"); private static final Path locksPath = Path.fromString("/config/v2/locks/"); private static final Path vespaPath = Path.fromString("/vespa"); private static final Duration checkForRemovedApplicationsInterval = Duration.ofMinutes(1); private static final Logger log = Logger.getLogger(TenantRepository.class.getName()); private final Map<TenantName, Tenant> tenants = Collections.synchronizedMap(new LinkedHashMap<>()); private final HostRegistry hostRegistry; private final List<TenantListener> tenantListeners = Collections.synchronizedList(new ArrayList<>()); private final Curator curator; private final Metrics metrics; private final MetricUpdater metricUpdater; private final ExecutorService zkCacheExecutor; private final StripedExecutor<TenantName> zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final FlagSource flagSource; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final Clock clock; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final ReloadListener reloadListener; private final TenantListener tenantListener; private final ExecutorService bootstrapExecutor; private final ScheduledExecutorService checkForRemovedApplicationsService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("check for removed applications")); private final Optional<Curator.DirectoryCache> directoryCache; /** * Creates a new tenant repository * */ @Inject public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, FlagSource flagSource, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this(hostRegistry, curator, metrics, new StripedExecutor<>(), new FileDistributionFactory(configserverConfig), flagSource, Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(TenantRepository.class.getName())), secretStore, hostProvisionerProvider, configserverConfig, configServerDB, zone, Clock.systemUTC(), modelFactoryRegistry, configDefinitionRepo, reloadListener, tenantListener); } public TenantRepository(HostRegistry hostRegistry, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, ReloadListener reloadListener, TenantListener tenantListener) { this.hostRegistry = hostRegistry; this.configserverConfig = configserverConfig; this.bootstrapExecutor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("bootstrap tenants")); this.curator = curator; this.metrics = metrics; metricUpdater = metrics.getOrCreateMetricUpdater(Collections.emptyMap()); this.tenantListeners.add(tenantListener); this.zkCacheExecutor = zkCacheExecutor; this.zkWatcherExecutor = zkWatcherExecutor; this.fileDistributionFactory = fileDistributionFactory; this.flagSource = flagSource; this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configServerDB = configServerDB; this.zone = zone; this.clock = clock; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.reloadListener = reloadListener; this.tenantListener = tenantListener; curator.framework().getConnectionStateListenable().addListener(this::stateChanged); curator.create(tenantsPath); curator.create(locksPath); createSystemTenants(configserverConfig); curator.create(vespaPath); this.directoryCache = Optional.of(curator.createDirectoryCache(tenantsPath.getAbsolute(), false, false, zkCacheExecutor)); this.directoryCache.get().addListener(this::childEvent); this.directoryCache.get().start(); bootstrapTenants(); notifyTenantsLoaded(); checkForRemovedApplicationsService.scheduleWithFixedDelay(this::removeUnusedApplications, checkForRemovedApplicationsInterval.getSeconds(), checkForRemovedApplicationsInterval.getSeconds(), TimeUnit.SECONDS); } private void notifyTenantsLoaded() { for (TenantListener tenantListener : tenantListeners) { tenantListener.onTenantsLoaded(); } } public synchronized Tenant addTenant(TenantName tenantName) { writeTenantPath(tenantName); return createTenant(tenantName, clock.instant()); } public void createAndWriteTenantMetaData(Tenant tenant) { createWriteTenantMetaDataTransaction(createMetaData(tenant)).commit(); } public Transaction createWriteTenantMetaDataTransaction(TenantMetaData tenantMetaData) { return new CuratorTransaction(curator).add( CuratorOperations.setData(TenantRepository.getTenantPath(tenantMetaData.tenantName()).getAbsolute(), tenantMetaData.asJsonBytes())); } private TenantMetaData createMetaData(Tenant tenant) { Instant deployTime = tenant.getSessionRepository().clock().instant(); Instant createdTime = getTenantMetaData(tenant).createdTimestamp(); if (createdTime.equals(Instant.EPOCH)) createdTime = deployTime; return new TenantMetaData(tenant.getName(), deployTime, createdTime); } public TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); Optional<TenantMetaData> metaData; try { metaData = data.map(bytes -> TenantMetaData.fromJsonString(tenant.getName(), Utf8.toString(bytes))); } catch (IllegalArgumentException e) { metaData = Optional.empty(); } return metaData.orElse(new TenantMetaData(tenant.getName(), tenant.getCreatedTime(), tenant.getCreatedTime())); } private static Set<TenantName> readTenantsFromZooKeeper(Curator curator) { return curator.getChildren(tenantsPath).stream().map(TenantName::from).collect(Collectors.toSet()); } private void bootstrapTenants() { Map<TenantName, Future<?>> futures = new HashMap<>(); readTenantsFromZooKeeper(curator).forEach(t -> futures.put(t, bootstrapExecutor.submit(() -> bootstrapTenant(t)))); Set<TenantName> failed = new HashSet<>(); for (Map.Entry<TenantName, Future<?>> f : futures.entrySet()) { TenantName tenantName = f.getKey(); try { f.getValue().get(); } catch (ExecutionException e) { log.log(Level.WARNING, "Failed to create tenant " + tenantName, e); failed.add(tenantName); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while creating tenant '" + tenantName + "'", e); } } if (failed.size() > 0) throw new RuntimeException("Could not create all tenants when bootstrapping, failed to create: " + failed); metricUpdater.setTenants(tenants.size()); bootstrapExecutor.shutdown(); try { bootstrapExecutor.awaitTermination(365, TimeUnit.DAYS); } catch (InterruptedException e) { throw new RuntimeException("Executor for creating tenants did not terminate within timeout"); } } protected synchronized void bootstrapTenant(TenantName tenantName) { createTenant(tenantName, readCreatedTimeFromZooKeeper(tenantName)); } public Instant readCreatedTimeFromZooKeeper(TenantName tenantName) { Optional<Stat> stat = curator.getStat(getTenantPath(tenantName)); if (stat.isPresent()) return Instant.ofEpochMilli(stat.get().getCtime()); else return clock.instant(); } /** * Returns a default (compatibility with single tenant config requests) tenant * * @return default tenant */ public synchronized Tenant defaultTenant() { return tenants.get(DEFAULT_TENANT); } private void removeUnusedApplications() { getAllTenants().forEach(tenant -> tenant.getApplicationRepo().removeUnusedApplications()); } private void notifyNewTenant(Tenant tenant) { for (TenantListener listener : tenantListeners) { listener.onTenantCreate(tenant); } } private void notifyRemovedTenant(TenantName name) { for (TenantListener listener : tenantListeners) { listener.onTenantDelete(name); } } /** * Creates the tenants that should always be present into ZooKeeper. Will not fail if the node * already exists, as this is OK and might happen when several config servers start at the * same time and try to call this method. */ private synchronized void createSystemTenants(ConfigserverConfig configserverConfig) { List<TenantName> systemTenants = new ArrayList<>(); systemTenants.add(DEFAULT_TENANT); if (configserverConfig.hostedVespa()) systemTenants.add(HOSTED_VESPA_TENANT); for (final TenantName tenantName : systemTenants) { try { writeTenantPath(tenantName); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NodeExistsException.class) { throw e; } } } } /** * Writes the path of the given tenant into ZooKeeper, for watchers to react on * * @param name name of the tenant */ private synchronized void writeTenantPath(TenantName name) { curator.createAtomically(TenantRepository.getTenantPath(name), TenantRepository.getSessionsPath(name), TenantRepository.getApplicationsPath(name), TenantRepository.getLocksPath(name)); } /** * Removes the given tenant from ZooKeeper and filesystem. Assumes that tenant exists. * * @param name name of the tenant */ public synchronized void deleteTenant(TenantName name) { if (name.equals(DEFAULT_TENANT)) throw new IllegalArgumentException("Deleting 'default' tenant is not allowed"); if ( ! tenants.containsKey(name)) throw new IllegalArgumentException("Deleting '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Deleting tenant '" + name + "'"); Path path = tenants.get(name).getPath(); closeTenant(name); curator.delete(path); } private synchronized void closeTenant(TenantName name) { Tenant tenant = tenants.remove(name); if (tenant == null) throw new IllegalArgumentException("Closing '" + name + "' failed, tenant does not exist"); log.log(Level.INFO, "Closing tenant '" + name + "'"); notifyRemovedTenant(name); tenant.close(); } /** * A helper to format a log preamble for messages with a tenant and app id * @param app the app * @return the log string */ public static String logPre(ApplicationId app) { if (DEFAULT_TENANT.equals(app.tenant())) return ""; StringBuilder ret = new StringBuilder() .append(logPre(app.tenant())) .append("app:"+app.application().value()) .append(":"+app.instance().value()) .append(" "); return ret.toString(); } /** * A helper to format a log preamble for messages with a tenant * @param tenant tenant * @return the log string */ public static String logPre(TenantName tenant) { if (DEFAULT_TENANT.equals(tenant)) return ""; StringBuilder ret = new StringBuilder() .append("tenant:" + tenant.value()) .append(" "); return ret.toString(); } private void stateChanged(CuratorFramework framework, ConnectionState connectionState) { switch (connectionState) { case CONNECTED: metricUpdater.incZKConnected(); break; case SUSPENDED: metricUpdater.incZKSuspended(); break; case RECONNECTED: metricUpdater.incZKReconnected(); break; case LOST: metricUpdater.incZKConnectionLost(); break; case READ_ONLY: break; } } private void childEvent(CuratorFramework framework, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: TenantName t1 = getTenantNameFromEvent(event); if ( ! tenants.containsKey(t1)) zkWatcherExecutor.execute(t1, () -> bootstrapTenant(t1)); break; case CHILD_REMOVED: TenantName t2 = getTenantNameFromEvent(event); if (tenants.containsKey(t2)) zkWatcherExecutor.execute(t2, () -> deleteTenant(t2)); break; default: break; } metricUpdater.setTenants(tenants.size()); } private TenantName getTenantNameFromEvent(PathChildrenCacheEvent event) { String path = event.getData().getPath(); String[] pathElements = path.split("/"); if (pathElements.length == 0) throw new IllegalArgumentException("Path " + path + " does not contain a tenant name"); return TenantName.from(pathElements[pathElements.length - 1]); } public void close() { directoryCache.ifPresent(Curator.DirectoryCache::close); try { zkCacheExecutor.shutdown(); checkForRemovedApplicationsService.shutdown(); zkWatcherExecutor.shutdownAndWait(); zkCacheExecutor.awaitTermination(50, TimeUnit.SECONDS); checkForRemovedApplicationsService.awaitTermination(50, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while shutting down.", e); Thread.currentThread().interrupt(); } } public boolean checkThatTenantExists(TenantName tenant) { return tenants.containsKey(tenant); } /** Returns the tenant with the given name, or {@code null} if this does not exist. */ public Tenant getTenant(TenantName tenantName) { return tenants.get(tenantName); } public Set<TenantName> getAllTenantNames() { return ImmutableSet.copyOf(tenants.keySet()); } public Collection<Tenant> getAllTenants() { return ImmutableSet.copyOf(tenants.values()); } /** * Gets zookeeper path for tenant data * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper data for a tenant */ public static Path getTenantPath(TenantName tenantName) { return tenantsPath.append(tenantName.value()); } /** * Gets zookeeper path for session data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper sessions data for a tenant */ public static Path getSessionsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.SESSIONS); } /** * Gets zookeeper path for application data for a tenant * * @param tenantName tenant name * @return a {@link com.yahoo.path.Path} to the zookeeper application data for a tenant */ public static Path getApplicationsPath(TenantName tenantName) { return getTenantPath(tenantName).append(Tenant.APPLICATIONS); } /** * Gets zookeeper path for locks for a tenant's applications. This is never cleaned, but shouldn't be a problem. */ public static Path getLocksPath(TenantName tenantName) { return locksPath.append(tenantName.value()); } public Curator getCurator() { return curator; } }
Call `shutdownNow` instead of `shutdown`. Ensure that compression runnable/task handles interruption correctly.
void shutdown() { logThread.interrupt(); try { logThread.join(); logThread.executor.shutdown(); logThread.executor.awaitTermination(600, TimeUnit.SECONDS); } catch (InterruptedException e) { } }
logThread.executor.shutdown();
void shutdown() { logThread.interrupt(); try { logThread.executor.shutdownNow(); logThread.executor.awaitTermination(600, TimeUnit.SECONDS); logThread.join(); } catch (InterruptedException e) { } }
class LogFileHandler <LOGTYPE> { enum Compression {NONE, GZIP, ZSTD} private final static Logger logger = Logger.getLogger(LogFileHandler.class.getName()); final LogThread<LOGTYPE> logThread; LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, logWriter); } LogFileHandler( Compression compression, String filePattern, long[] rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this.logThread = new LogThread<>(logWriter, filePattern, compression, rotationTimes, symlinkName); this.logThread.start(); } /** * Sends logrecord to file, first rotating file if needed. * * @param r logrecord to publish */ public void publish(LOGTYPE r) { addOperation(new Operation<>(r)); } public void flush() { addOperationAndWait(new Operation<>(Operation.Type.flush)); } /** * Force file rotation now, independent of schedule. */ void rotateNow() { addOperationAndWait(new Operation<>(Operation.Type.rotate)); } public void close() { addOperationAndWait(new Operation<>(Operation.Type.close)); } private void addOperation(Operation<LOGTYPE> op) { try { logThread.logQueue.put(op); } catch (InterruptedException e) { } } private void addOperationAndWait(Operation<LOGTYPE> op) { try { logThread.logQueue.put(op); op.countDownLatch.await(); } catch (InterruptedException e) { } } /** * Flushes all queued messages, interrupts the log thread in this and * waits for it to end before returning */ /** * Calculate rotation times array, given times in minutes, as "0 60 ..." */ private static long[] calcTimesMinutes(String times) { ArrayList<Long> list = new ArrayList<>(50); int i = 0; boolean etc = false; while (i < times.length()) { if (times.charAt(i) == ' ') { i++; continue; } int j = i; i = times.indexOf(' ', i); if (i == -1) i = times.length(); if (times.charAt(j) == '.' && times.substring(j, i).equals("...")) { etc = true; break; } list.add(Long.valueOf(times.substring(j, i))); } int size = list.size(); long[] longtimes = new long[size]; for (i = 0; i < size; i++) { longtimes[i] = list.get(i) * 60000; } if (etc) { long endOfDay = 24 * 60 * 60 * 1000; long lasttime = longtimes[size - 1]; long interval = lasttime - longtimes[size - 2]; long moreneeded = (endOfDay - lasttime) / interval; if (moreneeded > 0) { int newsize = size + (int) moreneeded; long[] temp = new long[newsize]; for (i = 0; i < size; i++) { temp[i] = longtimes[i]; } while (size < newsize) { lasttime += interval; temp[size++] = lasttime; } longtimes = temp; } } return longtimes; } /** * Only for unit testing. Do not use. */ String getFileName() { return logThread.fileName; } /** * Handle logging and file operations */ static class LogThread<LOGTYPE> extends Thread { long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(100000); private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; NativeIO nativeIO = new NativeIO(); for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } } private static class Operation<LOGTYPE> { enum Type {log, flush, close, rotate} ; final Type type; final Optional<LOGTYPE> log; final CountDownLatch countDownLatch = new CountDownLatch(1); Operation(Type type) { this(type, Optional.empty()); } Operation(LOGTYPE log) { this(Type.log, Optional.of(log)); } private Operation(Type type, Optional<LOGTYPE> log) { this.type = type; this.log = log; } } }
class LogFileHandler <LOGTYPE> { enum Compression {NONE, GZIP, ZSTD} private final static Logger logger = Logger.getLogger(LogFileHandler.class.getName()); private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(10000); final LogThread<LOGTYPE> logThread; @FunctionalInterface private interface Pollable<T> { Operation<T> poll() throws InterruptedException; } LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, logWriter); } LogFileHandler( Compression compression, String filePattern, long[] rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this.logThread = new LogThread<LOGTYPE>(logWriter, filePattern, compression, rotationTimes, symlinkName, this::poll); this.logThread.start(); } private Operation<LOGTYPE> poll() throws InterruptedException { return logQueue.poll(100, TimeUnit.MILLISECONDS); } /** * Sends logrecord to file, first rotating file if needed. * * @param r logrecord to publish */ public void publish(LOGTYPE r) { addOperation(new Operation<>(r)); } public void flush() { addOperationAndWait(new Operation<>(Operation.Type.flush)); } /** * Force file rotation now, independent of schedule. */ void rotateNow() { addOperationAndWait(new Operation<>(Operation.Type.rotate)); } public void close() { addOperationAndWait(new Operation<>(Operation.Type.close)); } private void addOperation(Operation<LOGTYPE> op) { try { logQueue.put(op); } catch (InterruptedException e) { } } private void addOperationAndWait(Operation<LOGTYPE> op) { try { logQueue.put(op); op.countDownLatch.await(); } catch (InterruptedException e) { } } /** * Flushes all queued messages, interrupts the log thread in this and * waits for it to end before returning */ /** * Calculate rotation times array, given times in minutes, as "0 60 ..." */ private static long[] calcTimesMinutes(String times) { ArrayList<Long> list = new ArrayList<>(50); int i = 0; boolean etc = false; while (i < times.length()) { if (times.charAt(i) == ' ') { i++; continue; } int j = i; i = times.indexOf(' ', i); if (i == -1) i = times.length(); if (times.charAt(j) == '.' && times.substring(j, i).equals("...")) { etc = true; break; } list.add(Long.valueOf(times.substring(j, i))); } int size = list.size(); long[] longtimes = new long[size]; for (i = 0; i < size; i++) { longtimes[i] = list.get(i) * 60000; } if (etc) { long endOfDay = 24 * 60 * 60 * 1000; long lasttime = longtimes[size - 1]; long interval = lasttime - longtimes[size - 2]; long moreneeded = (endOfDay - lasttime) / interval; if (moreneeded > 0) { int newsize = size + (int) moreneeded; long[] temp = new long[newsize]; for (i = 0; i < size; i++) { temp[i] = longtimes[i]; } while (size < newsize) { lasttime += interval; temp[size++] = lasttime; } longtimes = temp; } } return longtimes; } /** * Only for unit testing. Do not use. */ String getFileName() { return logThread.fileName; } /** * Handle logging and file operations */ static class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, Pollable<LOGTYPE> operationProvider) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); NativeIO nativeIO = new NativeIO(); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } } private static class Operation<LOGTYPE> { enum Type {log, flush, close, rotate} ; final Type type; final Optional<LOGTYPE> log; final CountDownLatch countDownLatch = new CountDownLatch(1); Operation(Type type) { this(type, Optional.empty()); } Operation(LOGTYPE log) { this(Type.log, Optional.of(log)); } private Operation(Type type, Optional<LOGTYPE> log) { this.type = type; this.log = log; } } }
I think the correct behaviour here is to set the thread's interrupt flag again before returning
private void addOperationAndWait(Operation<LOGTYPE> op) { try { logThread.logQueue.put(op); op.countDownLatch.await(); } catch (InterruptedException e) { } }
} catch (InterruptedException e) {
private void addOperationAndWait(Operation<LOGTYPE> op) { try { logQueue.put(op); op.countDownLatch.await(); } catch (InterruptedException e) { } }
class LogFileHandler <LOGTYPE> { enum Compression {NONE, GZIP, ZSTD} private final static Logger logger = Logger.getLogger(LogFileHandler.class.getName()); final LogThread<LOGTYPE> logThread; LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, logWriter); } LogFileHandler( Compression compression, String filePattern, long[] rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this.logThread = new LogThread<>(logWriter, filePattern, compression, rotationTimes, symlinkName); this.logThread.start(); } /** * Sends logrecord to file, first rotating file if needed. * * @param r logrecord to publish */ public void publish(LOGTYPE r) { addOperation(new Operation<>(r)); } public void flush() { addOperationAndWait(new Operation<>(Operation.Type.flush)); } /** * Force file rotation now, independent of schedule. */ void rotateNow() { addOperationAndWait(new Operation<>(Operation.Type.rotate)); } public void close() { addOperationAndWait(new Operation<>(Operation.Type.close)); } private void addOperation(Operation<LOGTYPE> op) { try { logThread.logQueue.put(op); } catch (InterruptedException e) { } } /** * Flushes all queued messages, interrupts the log thread in this and * waits for it to end before returning */ void shutdown() { logThread.interrupt(); try { logThread.join(); logThread.executor.shutdown(); logThread.executor.awaitTermination(600, TimeUnit.SECONDS); } catch (InterruptedException e) { } } /** * Calculate rotation times array, given times in minutes, as "0 60 ..." */ private static long[] calcTimesMinutes(String times) { ArrayList<Long> list = new ArrayList<>(50); int i = 0; boolean etc = false; while (i < times.length()) { if (times.charAt(i) == ' ') { i++; continue; } int j = i; i = times.indexOf(' ', i); if (i == -1) i = times.length(); if (times.charAt(j) == '.' && times.substring(j, i).equals("...")) { etc = true; break; } list.add(Long.valueOf(times.substring(j, i))); } int size = list.size(); long[] longtimes = new long[size]; for (i = 0; i < size; i++) { longtimes[i] = list.get(i) * 60000; } if (etc) { long endOfDay = 24 * 60 * 60 * 1000; long lasttime = longtimes[size - 1]; long interval = lasttime - longtimes[size - 2]; long moreneeded = (endOfDay - lasttime) / interval; if (moreneeded > 0) { int newsize = size + (int) moreneeded; long[] temp = new long[newsize]; for (i = 0; i < size; i++) { temp[i] = longtimes[i]; } while (size < newsize) { lasttime += interval; temp[size++] = lasttime; } longtimes = temp; } } return longtimes; } /** * Only for unit testing. Do not use. */ String getFileName() { return logThread.fileName; } /** * Handle logging and file operations */ static class LogThread<LOGTYPE> extends Thread { long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(100000); private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; NativeIO nativeIO = new NativeIO(); for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } } private static class Operation<LOGTYPE> { enum Type {log, flush, close, rotate} ; final Type type; final Optional<LOGTYPE> log; final CountDownLatch countDownLatch = new CountDownLatch(1); Operation(Type type) { this(type, Optional.empty()); } Operation(LOGTYPE log) { this(Type.log, Optional.of(log)); } private Operation(Type type, Optional<LOGTYPE> log) { this.type = type; this.log = log; } } }
class LogFileHandler <LOGTYPE> { enum Compression {NONE, GZIP, ZSTD} private final static Logger logger = Logger.getLogger(LogFileHandler.class.getName()); private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(10000); final LogThread<LOGTYPE> logThread; @FunctionalInterface private interface Pollable<T> { Operation<T> poll() throws InterruptedException; } LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, logWriter); } LogFileHandler( Compression compression, String filePattern, long[] rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this.logThread = new LogThread<LOGTYPE>(logWriter, filePattern, compression, rotationTimes, symlinkName, this::poll); this.logThread.start(); } private Operation<LOGTYPE> poll() throws InterruptedException { return logQueue.poll(100, TimeUnit.MILLISECONDS); } /** * Sends logrecord to file, first rotating file if needed. * * @param r logrecord to publish */ public void publish(LOGTYPE r) { addOperation(new Operation<>(r)); } public void flush() { addOperationAndWait(new Operation<>(Operation.Type.flush)); } /** * Force file rotation now, independent of schedule. */ void rotateNow() { addOperationAndWait(new Operation<>(Operation.Type.rotate)); } public void close() { addOperationAndWait(new Operation<>(Operation.Type.close)); } private void addOperation(Operation<LOGTYPE> op) { try { logQueue.put(op); } catch (InterruptedException e) { } } /** * Flushes all queued messages, interrupts the log thread in this and * waits for it to end before returning */ void shutdown() { logThread.interrupt(); try { logThread.executor.shutdownNow(); logThread.executor.awaitTermination(600, TimeUnit.SECONDS); logThread.join(); } catch (InterruptedException e) { } } /** * Calculate rotation times array, given times in minutes, as "0 60 ..." */ private static long[] calcTimesMinutes(String times) { ArrayList<Long> list = new ArrayList<>(50); int i = 0; boolean etc = false; while (i < times.length()) { if (times.charAt(i) == ' ') { i++; continue; } int j = i; i = times.indexOf(' ', i); if (i == -1) i = times.length(); if (times.charAt(j) == '.' && times.substring(j, i).equals("...")) { etc = true; break; } list.add(Long.valueOf(times.substring(j, i))); } int size = list.size(); long[] longtimes = new long[size]; for (i = 0; i < size; i++) { longtimes[i] = list.get(i) * 60000; } if (etc) { long endOfDay = 24 * 60 * 60 * 1000; long lasttime = longtimes[size - 1]; long interval = lasttime - longtimes[size - 2]; long moreneeded = (endOfDay - lasttime) / interval; if (moreneeded > 0) { int newsize = size + (int) moreneeded; long[] temp = new long[newsize]; for (i = 0; i < size; i++) { temp[i] = longtimes[i]; } while (size < newsize) { lasttime += interval; temp[size++] = lasttime; } longtimes = temp; } } return longtimes; } /** * Only for unit testing. Do not use. */ String getFileName() { return logThread.fileName; } /** * Handle logging and file operations */ static class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, Pollable<LOGTYPE> operationProvider) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); NativeIO nativeIO = new NativeIO(); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } } private static class Operation<LOGTYPE> { enum Type {log, flush, close, rotate} ; final Type type; final Optional<LOGTYPE> log; final CountDownLatch countDownLatch = new CountDownLatch(1); Operation(Type type) { this(type, Optional.empty()); } Operation(LOGTYPE log) { this(Type.log, Optional.of(log)); } private Operation(Type type, Optional<LOGTYPE> log) { this.type = type; this.log = log; } } }
See similar comment on catching `InterrupedException`
private void addOperation(Operation<LOGTYPE> op) { try { logThread.logQueue.put(op); } catch (InterruptedException e) { } }
} catch (InterruptedException e) {
private void addOperation(Operation<LOGTYPE> op) { try { logQueue.put(op); } catch (InterruptedException e) { } }
class LogFileHandler <LOGTYPE> { enum Compression {NONE, GZIP, ZSTD} private final static Logger logger = Logger.getLogger(LogFileHandler.class.getName()); final LogThread<LOGTYPE> logThread; LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, logWriter); } LogFileHandler( Compression compression, String filePattern, long[] rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this.logThread = new LogThread<>(logWriter, filePattern, compression, rotationTimes, symlinkName); this.logThread.start(); } /** * Sends logrecord to file, first rotating file if needed. * * @param r logrecord to publish */ public void publish(LOGTYPE r) { addOperation(new Operation<>(r)); } public void flush() { addOperationAndWait(new Operation<>(Operation.Type.flush)); } /** * Force file rotation now, independent of schedule. */ void rotateNow() { addOperationAndWait(new Operation<>(Operation.Type.rotate)); } public void close() { addOperationAndWait(new Operation<>(Operation.Type.close)); } private void addOperationAndWait(Operation<LOGTYPE> op) { try { logThread.logQueue.put(op); op.countDownLatch.await(); } catch (InterruptedException e) { } } /** * Flushes all queued messages, interrupts the log thread in this and * waits for it to end before returning */ void shutdown() { logThread.interrupt(); try { logThread.join(); logThread.executor.shutdown(); logThread.executor.awaitTermination(600, TimeUnit.SECONDS); } catch (InterruptedException e) { } } /** * Calculate rotation times array, given times in minutes, as "0 60 ..." */ private static long[] calcTimesMinutes(String times) { ArrayList<Long> list = new ArrayList<>(50); int i = 0; boolean etc = false; while (i < times.length()) { if (times.charAt(i) == ' ') { i++; continue; } int j = i; i = times.indexOf(' ', i); if (i == -1) i = times.length(); if (times.charAt(j) == '.' && times.substring(j, i).equals("...")) { etc = true; break; } list.add(Long.valueOf(times.substring(j, i))); } int size = list.size(); long[] longtimes = new long[size]; for (i = 0; i < size; i++) { longtimes[i] = list.get(i) * 60000; } if (etc) { long endOfDay = 24 * 60 * 60 * 1000; long lasttime = longtimes[size - 1]; long interval = lasttime - longtimes[size - 2]; long moreneeded = (endOfDay - lasttime) / interval; if (moreneeded > 0) { int newsize = size + (int) moreneeded; long[] temp = new long[newsize]; for (i = 0; i < size; i++) { temp[i] = longtimes[i]; } while (size < newsize) { lasttime += interval; temp[size++] = lasttime; } longtimes = temp; } } return longtimes; } /** * Only for unit testing. Do not use. */ String getFileName() { return logThread.fileName; } /** * Handle logging and file operations */ static class LogThread<LOGTYPE> extends Thread { long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(100000); private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; NativeIO nativeIO = new NativeIO(); for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } } private static class Operation<LOGTYPE> { enum Type {log, flush, close, rotate} ; final Type type; final Optional<LOGTYPE> log; final CountDownLatch countDownLatch = new CountDownLatch(1); Operation(Type type) { this(type, Optional.empty()); } Operation(LOGTYPE log) { this(Type.log, Optional.of(log)); } private Operation(Type type, Optional<LOGTYPE> log) { this.type = type; this.log = log; } } }
class LogFileHandler <LOGTYPE> { enum Compression {NONE, GZIP, ZSTD} private final static Logger logger = Logger.getLogger(LogFileHandler.class.getName()); private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(10000); final LogThread<LOGTYPE> logThread; @FunctionalInterface private interface Pollable<T> { Operation<T> poll() throws InterruptedException; } LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, logWriter); } LogFileHandler( Compression compression, String filePattern, long[] rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this.logThread = new LogThread<LOGTYPE>(logWriter, filePattern, compression, rotationTimes, symlinkName, this::poll); this.logThread.start(); } private Operation<LOGTYPE> poll() throws InterruptedException { return logQueue.poll(100, TimeUnit.MILLISECONDS); } /** * Sends logrecord to file, first rotating file if needed. * * @param r logrecord to publish */ public void publish(LOGTYPE r) { addOperation(new Operation<>(r)); } public void flush() { addOperationAndWait(new Operation<>(Operation.Type.flush)); } /** * Force file rotation now, independent of schedule. */ void rotateNow() { addOperationAndWait(new Operation<>(Operation.Type.rotate)); } public void close() { addOperationAndWait(new Operation<>(Operation.Type.close)); } private void addOperationAndWait(Operation<LOGTYPE> op) { try { logQueue.put(op); op.countDownLatch.await(); } catch (InterruptedException e) { } } /** * Flushes all queued messages, interrupts the log thread in this and * waits for it to end before returning */ void shutdown() { logThread.interrupt(); try { logThread.executor.shutdownNow(); logThread.executor.awaitTermination(600, TimeUnit.SECONDS); logThread.join(); } catch (InterruptedException e) { } } /** * Calculate rotation times array, given times in minutes, as "0 60 ..." */ private static long[] calcTimesMinutes(String times) { ArrayList<Long> list = new ArrayList<>(50); int i = 0; boolean etc = false; while (i < times.length()) { if (times.charAt(i) == ' ') { i++; continue; } int j = i; i = times.indexOf(' ', i); if (i == -1) i = times.length(); if (times.charAt(j) == '.' && times.substring(j, i).equals("...")) { etc = true; break; } list.add(Long.valueOf(times.substring(j, i))); } int size = list.size(); long[] longtimes = new long[size]; for (i = 0; i < size; i++) { longtimes[i] = list.get(i) * 60000; } if (etc) { long endOfDay = 24 * 60 * 60 * 1000; long lasttime = longtimes[size - 1]; long interval = lasttime - longtimes[size - 2]; long moreneeded = (endOfDay - lasttime) / interval; if (moreneeded > 0) { int newsize = size + (int) moreneeded; long[] temp = new long[newsize]; for (i = 0; i < size; i++) { temp[i] = longtimes[i]; } while (size < newsize) { lasttime += interval; temp[size++] = lasttime; } longtimes = temp; } } return longtimes; } /** * Only for unit testing. Do not use. */ String getFileName() { return logThread.fileName; } /** * Handle logging and file operations */ static class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, Pollable<LOGTYPE> operationProvider) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); NativeIO nativeIO = new NativeIO(); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } } private static class Operation<LOGTYPE> { enum Type {log, flush, close, rotate} ; final Type type; final Optional<LOGTYPE> log; final CountDownLatch countDownLatch = new CountDownLatch(1); Operation(Type type) { this(type, Optional.empty()); } Operation(LOGTYPE log) { this(Type.log, Optional.of(log)); } private Operation(Type type, Optional<LOGTYPE> log) { this.type = type; this.log = log; } } }
Old file should be deleted after input stream is closed
private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; NativeIO nativeIO = new NativeIO(); for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } }
oldFile.delete();
private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); NativeIO nativeIO = new NativeIO(); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); }
class LogThread<LOGTYPE> extends Thread { long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(100000); private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, Pollable<LOGTYPE> operationProvider) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
Fixed
private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { e.printStackTrace(); } }
e.printStackTrace();
private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; List<Future<Long>> futures = new ArrayList<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.add(executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { List<Future<Long>> futures = new ArrayList<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.add(executor.submit(() -> sessionAdded(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public long sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return sessionId; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); return sessionId; } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ long createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); return sessionId; } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; Map<Long, Future<?>> futures = new HashMap<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.put(sessionId, executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
This should be performed after closing the output stream
private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; NativeIO nativeIO = new NativeIO(); for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } }
nativeIO.dropFileFromCache(gzippedFile);
private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); NativeIO nativeIO = new NativeIO(); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); }
class LogThread<LOGTYPE> extends Thread { long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(100000); private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, Pollable<LOGTYPE> operationProvider) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
Consider rewriting this using `Files` instead of `/bin/ln`
private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } }
int retval = new ProcessExecuter().exec(cmd).getFirst();
private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } }
class LogThread<LOGTYPE> extends Thread { long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(100000); private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; NativeIO nativeIO = new NativeIO(); for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } /** * Name files by date - create a symlink with a constant name to the newest file */ private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, Pollable<LOGTYPE> operationProvider) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); NativeIO nativeIO = new NativeIO(); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } /** * Name files by date - create a symlink with a constant name to the newest file */ private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
You forgot to remove this :)
void shutdown() { logThread.interrupt(); try { System.out.println(1); logThread.executor.shutdownNow(); System.out.println(2); logThread.executor.awaitTermination(600, TimeUnit.SECONDS); System.out.println(3); logThread.join(); System.out.println(4); } catch (InterruptedException e) { } }
System.out.println(1);
void shutdown() { logThread.interrupt(); try { logThread.executor.shutdownNow(); logThread.executor.awaitTermination(600, TimeUnit.SECONDS); logThread.join(); } catch (InterruptedException e) { } }
class LogFileHandler <LOGTYPE> { enum Compression {NONE, GZIP, ZSTD} private final static Logger logger = Logger.getLogger(LogFileHandler.class.getName()); private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(100000); final LogThread<LOGTYPE> logThread; LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, logWriter); } LogFileHandler( Compression compression, String filePattern, long[] rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this.logThread = new LogThread<>(logWriter, filePattern, compression, rotationTimes, symlinkName, this::poll); this.logThread.start(); } private Operation<LOGTYPE> poll() { try { return logQueue.poll(100, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { return null; } } /** * Sends logrecord to file, first rotating file if needed. * * @param r logrecord to publish */ public void publish(LOGTYPE r) { addOperation(new Operation<>(r)); } public void flush() { addOperationAndWait(new Operation<>(Operation.Type.flush)); } /** * Force file rotation now, independent of schedule. */ void rotateNow() { addOperationAndWait(new Operation<>(Operation.Type.rotate)); } public void close() { addOperationAndWait(new Operation<>(Operation.Type.close)); } private void addOperation(Operation<LOGTYPE> op) { try { logQueue.put(op); } catch (InterruptedException e) { } } private void addOperationAndWait(Operation<LOGTYPE> op) { try { logQueue.put(op); op.countDownLatch.await(); } catch (InterruptedException e) { } } /** * Flushes all queued messages, interrupts the log thread in this and * waits for it to end before returning */ /** * Calculate rotation times array, given times in minutes, as "0 60 ..." */ private static long[] calcTimesMinutes(String times) { ArrayList<Long> list = new ArrayList<>(50); int i = 0; boolean etc = false; while (i < times.length()) { if (times.charAt(i) == ' ') { i++; continue; } int j = i; i = times.indexOf(' ', i); if (i == -1) i = times.length(); if (times.charAt(j) == '.' && times.substring(j, i).equals("...")) { etc = true; break; } list.add(Long.valueOf(times.substring(j, i))); } int size = list.size(); long[] longtimes = new long[size]; for (i = 0; i < size; i++) { longtimes[i] = list.get(i) * 60000; } if (etc) { long endOfDay = 24 * 60 * 60 * 1000; long lasttime = longtimes[size - 1]; long interval = lasttime - longtimes[size - 2]; long moreneeded = (endOfDay - lasttime) / interval; if (moreneeded > 0) { int newsize = size + (int) moreneeded; long[] temp = new long[newsize]; for (i = 0; i < size; i++) { temp[i] = longtimes[i]; } while (size < newsize) { lasttime += interval; temp[size++] = lasttime; } longtimes = temp; } } return longtimes; } /** * Only for unit testing. Do not use. */ String getFileName() { return logThread.fileName; } /** * Handle logging and file operations */ static class LogThread<LOGTYPE> extends Thread { private final Supplier<Operation<LOGTYPE>> operationProvider; long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, Supplier<Operation<LOGTYPE>> operationProvider) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.get(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); NativeIO nativeIO = new NativeIO(); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } } private static class Operation<LOGTYPE> { enum Type {log, flush, close, rotate} ; final Type type; final Optional<LOGTYPE> log; final CountDownLatch countDownLatch = new CountDownLatch(1); Operation(Type type) { this(type, Optional.empty()); } Operation(LOGTYPE log) { this(Type.log, Optional.of(log)); } private Operation(Type type, Optional<LOGTYPE> log) { this.type = type; this.log = log; } } }
class LogFileHandler <LOGTYPE> { enum Compression {NONE, GZIP, ZSTD} private final static Logger logger = Logger.getLogger(LogFileHandler.class.getName()); private final ArrayBlockingQueue<Operation<LOGTYPE>> logQueue = new ArrayBlockingQueue<>(10000); final LogThread<LOGTYPE> logThread; @FunctionalInterface private interface Pollable<T> { Operation<T> poll() throws InterruptedException; } LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, logWriter); } LogFileHandler( Compression compression, String filePattern, long[] rotationTimes, String symlinkName, LogWriter<LOGTYPE> logWriter) { this.logThread = new LogThread<LOGTYPE>(logWriter, filePattern, compression, rotationTimes, symlinkName, this::poll); this.logThread.start(); } private Operation<LOGTYPE> poll() throws InterruptedException { return logQueue.poll(100, TimeUnit.MILLISECONDS); } /** * Sends logrecord to file, first rotating file if needed. * * @param r logrecord to publish */ public void publish(LOGTYPE r) { addOperation(new Operation<>(r)); } public void flush() { addOperationAndWait(new Operation<>(Operation.Type.flush)); } /** * Force file rotation now, independent of schedule. */ void rotateNow() { addOperationAndWait(new Operation<>(Operation.Type.rotate)); } public void close() { addOperationAndWait(new Operation<>(Operation.Type.close)); } private void addOperation(Operation<LOGTYPE> op) { try { logQueue.put(op); } catch (InterruptedException e) { } } private void addOperationAndWait(Operation<LOGTYPE> op) { try { logQueue.put(op); op.countDownLatch.await(); } catch (InterruptedException e) { } } /** * Flushes all queued messages, interrupts the log thread in this and * waits for it to end before returning */ /** * Calculate rotation times array, given times in minutes, as "0 60 ..." */ private static long[] calcTimesMinutes(String times) { ArrayList<Long> list = new ArrayList<>(50); int i = 0; boolean etc = false; while (i < times.length()) { if (times.charAt(i) == ' ') { i++; continue; } int j = i; i = times.indexOf(' ', i); if (i == -1) i = times.length(); if (times.charAt(j) == '.' && times.substring(j, i).equals("...")) { etc = true; break; } list.add(Long.valueOf(times.substring(j, i))); } int size = list.size(); long[] longtimes = new long[size]; for (i = 0; i < size; i++) { longtimes[i] = list.get(i) * 60000; } if (etc) { long endOfDay = 24 * 60 * 60 * 1000; long lasttime = longtimes[size - 1]; long interval = lasttime - longtimes[size - 2]; long moreneeded = (endOfDay - lasttime) / interval; if (moreneeded > 0) { int newsize = size + (int) moreneeded; long[] temp = new long[newsize]; for (i = 0; i < size; i++) { temp[i] = longtimes[i]; } while (size < newsize) { lasttime += interval; temp[size++] = lasttime; } longtimes = temp; } } return longtimes; } /** * Only for unit testing. Do not use. */ String getFileName() { return logThread.fileName; } /** * Handle logging and file operations */ static class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private FileOutputStream currentOutputStream = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private long lastDropPosition = 0; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression")); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, Pollable<LOGTYPE> operationProvider) { super("Logger"); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private synchronized void internalFlush() { try { FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) { if (compression == Compression.GZIP) { long newPos = currentOut.getChannel().position(); if (newPos > lastDropPosition + 102400) { nativeIO.dropPartialFileFromCache(currentOut.getFD(), lastDropPosition, newPos, true); lastDropPosition = newPos; } } else { currentOut.flush(); } } } catch (IOException e) { logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e)); } } private void internalClose() { try { internalFlush(); FileOutputStream currentOut = this.currentOutputStream; if (currentOut != null) currentOut.close(); } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file", e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || currentOutputStream == null) { internalRotateNow(); } try { FileOutputStream out = this.currentOutputStream; logWriter.write(r, out); out.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalFlush(); try { checkAndCreateDir(fileName); FileOutputStream os = new FileOutputStream(fileName, true); currentOutputStream = os; lastDropPosition = 0; LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { File oldFile = new File(oldFileName); if (oldFile.exists()) { if (compression != Compression.NONE) { executor.execute(() -> runCompression(oldFile, compression)); } else { nativeIO.dropFileFromCache(oldFile); } } } } private static void runCompression(File oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(oldFile.toPath()); break; case GZIP: runCompressionGzip(oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionZstd(Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); Files.createFile(compressedFile); int bufferSize = 0x400000; byte[] buffer = new byte[bufferSize]; try (ZstdOuputStream out = new ZstdOuputStream(Files.newOutputStream(compressedFile), bufferSize); InputStream in = Files.newInputStream(oldFile)) { int read; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); } out.flush(); } Files.delete(oldFile); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } } private static void runCompressionGzip(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); NativeIO nativeIO = new NativeIO(); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { byte[] buffer = new byte[0x400000]; long totalBytesRead = 0; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false); totalBytesRead += read; } compressor.finish(); compressor.flush(); } catch (IOException e) { logger.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } oldFile.delete(); nativeIO.dropFileFromCache(gzippedFile); } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; File f = new File(fileName); File f2 = new File(f.getParent(), symlinkName); String[] cmd = new String[]{"/bin/ln", "-sf", f.getName(), f2.getPath()}; try { int retval = new ProcessExecuter().exec(cmd).getFirst(); if (retval != 0) { logger.warning("Command '" + Arrays.toString(cmd) + "' + failed with exitcode=" + retval); } } catch (IOException e) { logger.warning("Got '" + e + "' while doing'" + Arrays.toString(cmd) + "'."); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } } private static class Operation<LOGTYPE> { enum Type {log, flush, close, rotate} ; final Type type; final Optional<LOGTYPE> log; final CountDownLatch countDownLatch = new CountDownLatch(1); Operation(Type type) { this(type, Optional.empty()); } Operation(LOGTYPE log) { this(Type.log, Optional.of(log)); } private Operation(Type type, Optional<LOGTYPE> log) { this.type = type; this.log = log; } } }
log instead?
private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { e.printStackTrace(); } }
e.printStackTrace();
private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadLocalSessions(executor); loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; List<Future<Long>> futures = new ArrayList<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.add(executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { List<Future<Long>> futures = new ArrayList<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.add(executor.submit(() -> sessionAdded(sessionId))); } futures.forEach(f -> { try { long sessionId = f.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session"); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public long sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return sessionId; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); return sessionId; } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ long createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); return sessionId; } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final ConfigCurator configCurator; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final TenantListener tenantListener; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, TenantListener tenantListener) { this.tenantName = tenantName; this.configCurator = ConfigCurator.create(curator); sessionCounter = new SessionCounter(configCurator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.tenantListener = tenantListener; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); remoteSessionCache.putIfAbsent(sessionId, createRemoteSession(sessionId)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } public Collection<LocalSession> getLocalSessions() { return localSessionCache.values(); } private void loadLocalSessions(ExecutorService executor) { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; Map<Long, Future<?>> futures = new HashMap<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); futures.put(sessionId, executor.submit(() -> createSessionFromId(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Local session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.values()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); remoteSessionCache.put(sessionId, session); loadSessionIfActive(session); updateSessionStateWatcher(sessionId, session); return session; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessionsFromZooKeeper()) { Session session = remoteSessionCache.get(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.INFO, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { log.log(Level.WARNING, "Could not load session " + sessionId, e); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void delete(Session remoteSession) { long sessionId = remoteSession.getSessionId(); log.log(Level.FINE, () -> remoteSession.logPre() + "Deactivating and deleting remote session " + sessionId); createSetStatusTransaction(remoteSession, Session.Status.DELETE).commit(); deleteRemoteSessionFromZooKeeper(remoteSession); remoteSessionCache.remove(sessionId); LocalSession localSession = getLocalSession(sessionId); if (localSession != null) { log.log(Level.FINE, () -> localSession.logPre() + "Deleting local session " + sessionId); deleteLocalSession(localSession); } } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, Session session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo, tenantListener); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (Session session : remoteSessionCache.values()) { sessionMetrics.add(session.getStatus()); } metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); try { for (LocalSession candidate : localSessionCache.values()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (configCurator.exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { RemoteSession session = getRemoteSession(sessionId); applicationSet = Optional.ofNullable(ensureApplicationLoaded(session)); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = configserverConfig.serverId(); return new SessionZooKeeperClient(curator, configCurator, tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Have you tried this? I'm pretty sure 2-4 is too low.
private void configureJettyThreadpool(ServerConfig.Builder builder) { if (cluster == null) return; if (cluster instanceof ApplicationContainerCluster) { configureApplicationClusterJettyThreadPool(builder); } else { builder.minWorkerThreads(2).maxWorkerThreads(4); } }
builder.minWorkerThreads(2).maxWorkerThreads(4);
private void configureJettyThreadpool(ServerConfig.Builder builder) { if (cluster == null) return; if (cluster instanceof ApplicationContainerCluster) { configureApplicationClusterJettyThreadPool(builder); } else { builder.minWorkerThreads(2).maxWorkerThreads(4); } }
class JettyHttpServer extends SimpleComponent implements ServerConfig.Producer { private final ContainerCluster<?> cluster; private final boolean isHostedVespa; private final List<ConnectorFactory> connectorFactories = new ArrayList<>(); private final boolean enableJdiscConnectionLog; public JettyHttpServer(ComponentId id, ContainerCluster<?> cluster, ModelContext.FeatureFlags featureFlags, boolean isHostedVespa) { super(new ComponentModel( new BundleInstantiationSpecification(id, fromString("com.yahoo.jdisc.http.server.jetty.JettyHttpServer"), fromString("jdisc_http_service")) )); this.isHostedVespa = isHostedVespa; this.cluster = cluster; final FilterBindingsProviderComponent filterBindingsProviderComponent = new FilterBindingsProviderComponent(id); addChild(filterBindingsProviderComponent); inject(filterBindingsProviderComponent); this.enableJdiscConnectionLog = featureFlags.enableJdiscConnectionLog(); } public void addConnector(ConnectorFactory connectorFactory) { connectorFactories.add(connectorFactory); addChild(connectorFactory); } public List<ConnectorFactory> getConnectorFactories() { return Collections.unmodifiableList(connectorFactories); } @Override public void getConfig(ServerConfig.Builder builder) { builder.metric(new ServerConfig.Metric.Builder() .monitoringHandlerPaths(List.of("/state/v1", "/status.html")) .searchHandlerPaths(List.of("/search")) ); if (isHostedVespa) { builder.accessLog(new ServerConfig.AccessLog.Builder() .remoteAddressHeaders(List.of()) .remotePortHeaders(List.of())); } else { builder.accessLog(new ServerConfig.AccessLog.Builder() .remoteAddressHeaders(List.of("x-forwarded-for", "y-ra", "yahooremoteip", "client-ip")) .remotePortHeaders(List.of("X-Forwarded-Port", "y-rp"))); } configureJettyThreadpool(builder); builder.connectionLog(new ServerConfig.ConnectionLog.Builder() .enabled(enableJdiscConnectionLog)); } private void configureApplicationClusterJettyThreadPool(ServerConfig.Builder builder) { double vcpu = cluster.vcpu().orElse(0); if (vcpu > 0) { int threads = 16 + (int) Math.ceil(vcpu); builder.minWorkerThreads(threads).maxWorkerThreads(threads); } } static ComponentModel providerComponentModel(final ComponentId parentId, String className) { final ComponentSpecification classNameSpec = new ComponentSpecification( className); return new ComponentModel(new BundleInstantiationSpecification( classNameSpec.nestInNamespace(parentId), classNameSpec, null)); } public static final class FilterBindingsProviderComponent extends SimpleComponent { public FilterBindingsProviderComponent(final ComponentId parentId) { super(providerComponentModel(parentId, "com.yahoo.container.jdisc.FilterBindingsProvider")); } } }
class JettyHttpServer extends SimpleComponent implements ServerConfig.Producer { private final ContainerCluster<?> cluster; private final boolean isHostedVespa; private final List<ConnectorFactory> connectorFactories = new ArrayList<>(); private final boolean enableJdiscConnectionLog; public JettyHttpServer(ComponentId id, ContainerCluster<?> cluster, ModelContext.FeatureFlags featureFlags, boolean isHostedVespa) { super(new ComponentModel( new BundleInstantiationSpecification(id, fromString("com.yahoo.jdisc.http.server.jetty.JettyHttpServer"), fromString("jdisc_http_service")) )); this.isHostedVespa = isHostedVespa; this.cluster = cluster; final FilterBindingsProviderComponent filterBindingsProviderComponent = new FilterBindingsProviderComponent(id); addChild(filterBindingsProviderComponent); inject(filterBindingsProviderComponent); this.enableJdiscConnectionLog = featureFlags.enableJdiscConnectionLog(); } public void addConnector(ConnectorFactory connectorFactory) { connectorFactories.add(connectorFactory); addChild(connectorFactory); } public List<ConnectorFactory> getConnectorFactories() { return Collections.unmodifiableList(connectorFactories); } @Override public void getConfig(ServerConfig.Builder builder) { builder.metric(new ServerConfig.Metric.Builder() .monitoringHandlerPaths(List.of("/state/v1", "/status.html")) .searchHandlerPaths(List.of("/search")) ); if (isHostedVespa) { builder.accessLog(new ServerConfig.AccessLog.Builder() .remoteAddressHeaders(List.of()) .remotePortHeaders(List.of())); } else { builder.accessLog(new ServerConfig.AccessLog.Builder() .remoteAddressHeaders(List.of("x-forwarded-for", "y-ra", "yahooremoteip", "client-ip")) .remotePortHeaders(List.of("X-Forwarded-Port", "y-rp"))); } configureJettyThreadpool(builder); builder.connectionLog(new ServerConfig.ConnectionLog.Builder() .enabled(enableJdiscConnectionLog)); } private void configureApplicationClusterJettyThreadPool(ServerConfig.Builder builder) { double vcpu = cluster.vcpu().orElse(0); if (vcpu > 0) { int threads = 16 + (int) Math.ceil(vcpu); builder.minWorkerThreads(threads).maxWorkerThreads(threads); } } static ComponentModel providerComponentModel(final ComponentId parentId, String className) { final ComponentSpecification classNameSpec = new ComponentSpecification( className); return new ComponentModel(new BundleInstantiationSpecification( classNameSpec.nestInNamespace(parentId), classNameSpec, null)); } public static final class FilterBindingsProviderComponent extends SimpleComponent { public FilterBindingsProviderComponent(final ComponentId parentId) { super(providerComponentModel(parentId, "com.yahoo.container.jdisc.FilterBindingsProvider")); } } }
No, not tried. Will run a few system tests.
private void configureJettyThreadpool(ServerConfig.Builder builder) { if (cluster == null) return; if (cluster instanceof ApplicationContainerCluster) { configureApplicationClusterJettyThreadPool(builder); } else { builder.minWorkerThreads(2).maxWorkerThreads(4); } }
builder.minWorkerThreads(2).maxWorkerThreads(4);
private void configureJettyThreadpool(ServerConfig.Builder builder) { if (cluster == null) return; if (cluster instanceof ApplicationContainerCluster) { configureApplicationClusterJettyThreadPool(builder); } else { builder.minWorkerThreads(2).maxWorkerThreads(4); } }
class JettyHttpServer extends SimpleComponent implements ServerConfig.Producer { private final ContainerCluster<?> cluster; private final boolean isHostedVespa; private final List<ConnectorFactory> connectorFactories = new ArrayList<>(); private final boolean enableJdiscConnectionLog; public JettyHttpServer(ComponentId id, ContainerCluster<?> cluster, ModelContext.FeatureFlags featureFlags, boolean isHostedVespa) { super(new ComponentModel( new BundleInstantiationSpecification(id, fromString("com.yahoo.jdisc.http.server.jetty.JettyHttpServer"), fromString("jdisc_http_service")) )); this.isHostedVespa = isHostedVespa; this.cluster = cluster; final FilterBindingsProviderComponent filterBindingsProviderComponent = new FilterBindingsProviderComponent(id); addChild(filterBindingsProviderComponent); inject(filterBindingsProviderComponent); this.enableJdiscConnectionLog = featureFlags.enableJdiscConnectionLog(); } public void addConnector(ConnectorFactory connectorFactory) { connectorFactories.add(connectorFactory); addChild(connectorFactory); } public List<ConnectorFactory> getConnectorFactories() { return Collections.unmodifiableList(connectorFactories); } @Override public void getConfig(ServerConfig.Builder builder) { builder.metric(new ServerConfig.Metric.Builder() .monitoringHandlerPaths(List.of("/state/v1", "/status.html")) .searchHandlerPaths(List.of("/search")) ); if (isHostedVespa) { builder.accessLog(new ServerConfig.AccessLog.Builder() .remoteAddressHeaders(List.of()) .remotePortHeaders(List.of())); } else { builder.accessLog(new ServerConfig.AccessLog.Builder() .remoteAddressHeaders(List.of("x-forwarded-for", "y-ra", "yahooremoteip", "client-ip")) .remotePortHeaders(List.of("X-Forwarded-Port", "y-rp"))); } configureJettyThreadpool(builder); builder.connectionLog(new ServerConfig.ConnectionLog.Builder() .enabled(enableJdiscConnectionLog)); } private void configureApplicationClusterJettyThreadPool(ServerConfig.Builder builder) { double vcpu = cluster.vcpu().orElse(0); if (vcpu > 0) { int threads = 16 + (int) Math.ceil(vcpu); builder.minWorkerThreads(threads).maxWorkerThreads(threads); } } static ComponentModel providerComponentModel(final ComponentId parentId, String className) { final ComponentSpecification classNameSpec = new ComponentSpecification( className); return new ComponentModel(new BundleInstantiationSpecification( classNameSpec.nestInNamespace(parentId), classNameSpec, null)); } public static final class FilterBindingsProviderComponent extends SimpleComponent { public FilterBindingsProviderComponent(final ComponentId parentId) { super(providerComponentModel(parentId, "com.yahoo.container.jdisc.FilterBindingsProvider")); } } }
class JettyHttpServer extends SimpleComponent implements ServerConfig.Producer { private final ContainerCluster<?> cluster; private final boolean isHostedVespa; private final List<ConnectorFactory> connectorFactories = new ArrayList<>(); private final boolean enableJdiscConnectionLog; public JettyHttpServer(ComponentId id, ContainerCluster<?> cluster, ModelContext.FeatureFlags featureFlags, boolean isHostedVespa) { super(new ComponentModel( new BundleInstantiationSpecification(id, fromString("com.yahoo.jdisc.http.server.jetty.JettyHttpServer"), fromString("jdisc_http_service")) )); this.isHostedVespa = isHostedVespa; this.cluster = cluster; final FilterBindingsProviderComponent filterBindingsProviderComponent = new FilterBindingsProviderComponent(id); addChild(filterBindingsProviderComponent); inject(filterBindingsProviderComponent); this.enableJdiscConnectionLog = featureFlags.enableJdiscConnectionLog(); } public void addConnector(ConnectorFactory connectorFactory) { connectorFactories.add(connectorFactory); addChild(connectorFactory); } public List<ConnectorFactory> getConnectorFactories() { return Collections.unmodifiableList(connectorFactories); } @Override public void getConfig(ServerConfig.Builder builder) { builder.metric(new ServerConfig.Metric.Builder() .monitoringHandlerPaths(List.of("/state/v1", "/status.html")) .searchHandlerPaths(List.of("/search")) ); if (isHostedVespa) { builder.accessLog(new ServerConfig.AccessLog.Builder() .remoteAddressHeaders(List.of()) .remotePortHeaders(List.of())); } else { builder.accessLog(new ServerConfig.AccessLog.Builder() .remoteAddressHeaders(List.of("x-forwarded-for", "y-ra", "yahooremoteip", "client-ip")) .remotePortHeaders(List.of("X-Forwarded-Port", "y-rp"))); } configureJettyThreadpool(builder); builder.connectionLog(new ServerConfig.ConnectionLog.Builder() .enabled(enableJdiscConnectionLog)); } private void configureApplicationClusterJettyThreadPool(ServerConfig.Builder builder) { double vcpu = cluster.vcpu().orElse(0); if (vcpu > 0) { int threads = 16 + (int) Math.ceil(vcpu); builder.minWorkerThreads(threads).maxWorkerThreads(threads); } } static ComponentModel providerComponentModel(final ComponentId parentId, String className) { final ComponentSpecification classNameSpec = new ComponentSpecification( className); return new ComponentModel(new BundleInstantiationSpecification( classNameSpec.nestInNamespace(parentId), classNameSpec, null)); } public static final class FilterBindingsProviderComponent extends SimpleComponent { public FilterBindingsProviderComponent(final ComponentId parentId) { super(providerComponentModel(parentId, "com.yahoo.container.jdisc.FilterBindingsProvider")); } } }
is ... this a valid externalId?
void toSlime(Cursor slime) { slime.setString("name", name); slime.setString("role", role); slime.setString("awsId", awsId); slime.setString("externalId", "*****"); }
slime.setString("externalId", "*****");
void toSlime(Cursor slime) { slime.setString("name", name); slime.setString("role", role); slime.setString("awsId", awsId); slime.setString("externalId", "*****"); }
class AwsSettings { String name; String role; String awsId; String externalId; AwsSettings(String name, String role, String awsId, String externalId) { this.name = name; this.role = role; this.awsId = awsId; this.externalId = externalId; } static AwsSettings fromSlime(Slime slime) { var json = slime.get(); return new AwsSettings( json.field("name").asString(), json.field("role").asString(), json.field("awsId").asString(), json.field("externalId").asString() ); } }
class AwsSettings { String name; String role; String awsId; String externalId; AwsSettings(String name, String role, String awsId, String externalId) { this.name = name; this.role = role; this.awsId = awsId; this.externalId = externalId; } static AwsSettings fromSlime(Slime slime) { var json = slime.get(); return new AwsSettings( json.field("name").asString(), json.field("role").asString(), json.field("awsId").asString(), json.field("externalId").asString() ); } }
```suggestion this.jobRunner = duration(90, SECONDS); ``` This one probably times out sometimes because it does something synchronously that very seldomly takes a long time — it's just supposed to dispatch to an executor. I'll fix that instead.
public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(6, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(90, SECONDS); this.jobRunner = duration(105, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(30, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); this.endpointCertificateMaintainer = duration(12, HOURS); }
this.jobRunner = duration(105, SECONDS);
public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(10, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(90, SECONDS); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(30, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); this.endpointCertificateMaintainer = duration(12, HOURS); }
class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; private final Duration endpointCertificateMaintainer; private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } }
class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; private final Duration endpointCertificateMaintainer; private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } }
```suggestion this.deploymentMetricsMaintainer = duration(10, MINUTES); ``` With so many collisions already, and at such a low rate, this one probably needs more bump?
public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(6, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(90, SECONDS); this.jobRunner = duration(105, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(30, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); this.endpointCertificateMaintainer = duration(12, HOURS); }
this.deploymentMetricsMaintainer = duration(6, MINUTES);
public Intervals(SystemName system) { this.system = Objects.requireNonNull(system); this.defaultInterval = duration(system.isCd() || system == SystemName.dev ? 1 : 5, MINUTES); this.outstandingChangeDeployer = duration(3, MINUTES); this.versionStatusUpdater = duration(3, MINUTES); this.readyJobsTrigger = duration(1, MINUTES); this.deploymentMetricsMaintainer = duration(10, MINUTES); this.applicationOwnershipConfirmer = duration(12, HOURS); this.systemUpgrader = duration(90, SECONDS); this.jobRunner = duration(90, SECONDS); this.osUpgrader = duration(1, MINUTES); this.contactInformationMaintainer = duration(12, HOURS); this.nameServiceDispatcher = duration(30, SECONDS); this.costReportMaintainer = duration(2, HOURS); this.resourceMeterMaintainer = duration(1, MINUTES); this.cloudEventReporter = duration(30, MINUTES); this.resourceTagMaintainer = duration(30, MINUTES); this.systemRoutingPolicyMaintainer = duration(10, MINUTES); this.applicationMetaDataGarbageCollector = duration(12, HOURS); this.containerImageExpirer = duration(2, HOURS); this.hostSwitchUpdater = duration(12, HOURS); this.reindexingTriggerer = duration(1, HOURS); this.endpointCertificateMaintainer = duration(12, HOURS); }
class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; private final Duration endpointCertificateMaintainer; private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } }
class Intervals { private static final Duration MAX_CD_INTERVAL = Duration.ofHours(1); private final SystemName system; private final Duration defaultInterval; private final Duration outstandingChangeDeployer; private final Duration versionStatusUpdater; private final Duration readyJobsTrigger; private final Duration deploymentMetricsMaintainer; private final Duration applicationOwnershipConfirmer; private final Duration systemUpgrader; private final Duration jobRunner; private final Duration osUpgrader; private final Duration contactInformationMaintainer; private final Duration nameServiceDispatcher; private final Duration costReportMaintainer; private final Duration resourceMeterMaintainer; private final Duration cloudEventReporter; private final Duration resourceTagMaintainer; private final Duration systemRoutingPolicyMaintainer; private final Duration applicationMetaDataGarbageCollector; private final Duration containerImageExpirer; private final Duration hostSwitchUpdater; private final Duration reindexingTriggerer; private final Duration endpointCertificateMaintainer; private Duration duration(long amount, TemporalUnit unit) { Duration duration = Duration.of(amount, unit); if (system.isCd() && duration.compareTo(MAX_CD_INTERVAL) > 0) { return MAX_CD_INTERVAL; } return duration; } }
Nitpick: Should _wine_ be kept at 0.3 here? It doesn't affect the result, but the reader might wonder why it was changed.
public void cluster_feed_block_state_is_not_recomputed_when_only_resource_usage_levels_differ() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); var bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.9), usage("wine", 0.4))); bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); }
reportResourceUsageFromNode(1, setOf(usage("cheese", 0.9), usage("wine", 0.4)));
public void cluster_feed_block_state_is_not_recomputed_when_only_resource_usage_levels_differ() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); var bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.9), usage("wine", 0.4))); bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); }
class ClusterFeedBlockTest extends FleetControllerTest { private static final int NODE_COUNT = 3; private Supervisor supervisor; private FleetController ctrl; private DummyCommunicator communicator; private EventLog eventLog; private int dummyConfigGeneration = 2; @Before public void setUp() { supervisor = new Supervisor(new Transport()); } private void initialize(FleetControllerOptions options) throws Exception { List<Node> nodes = new ArrayList<>(); for (int i = 0; i < options.nodes.size(); ++i) { nodes.add(new Node(NodeType.STORAGE, i)); nodes.add(new Node(NodeType.DISTRIBUTOR, i)); } communicator = new DummyCommunicator(nodes, timer); MetricUpdater metricUpdater = new MetricUpdater(new NoMetricReporter(), options.fleetControllerIndex); eventLog = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster(options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, eventLog); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, eventLog, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); ctrl = new FleetController(timer, eventLog, cluster, stateGatherer, communicator, null, null, communicator, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); ctrl.tick(); markAllNodesAsUp(options); ctrl.tick(); } private void markAllNodesAsUp(FleetControllerOptions options) throws Exception { for (int i = 0; i < options.nodes.size(); ++i) { communicator.setNodeState(new Node(NodeType.STORAGE, i), State.UP, ""); communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, i), State.UP, ""); } ctrl.tick(); } public void tearDown() throws Exception { if (supervisor != null) { supervisor.transport().shutdown().join(); supervisor = null; } super.tearDown(); } private static FleetControllerOptions createOptions(Map<String, Double> feedBlockLimits) { FleetControllerOptions options = defaultOptions("mycluster"); options.setStorageDistribution(DistributionBuilder.forFlatCluster(NODE_COUNT)); options.nodes = new HashSet<>(DistributionBuilder.buildConfiguredNodes(NODE_COUNT)); options.clusterFeedBlockEnabled = true; options.clusterFeedBlockLimit = Map.copyOf(feedBlockLimits); return options; } private void reportResourceUsageFromNode(int nodeIndex, Set<FeedBlockUtil.UsageDetails> resourceUsages) throws Exception { String hostInfo = createResourceUsageJson(resourceUsages); communicator.setNodeState(new Node(NodeType.STORAGE, nodeIndex), new NodeState(NodeType.STORAGE, State.UP), hostInfo); ctrl.tick(); } @Test public void cluster_feed_can_be_blocked_and_unblocked_by_single_node() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.5))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.6), usage("wine", 0.3))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); } @Test public void cluster_feed_block_state_is_recomputed_when_options_are_updated() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); ctrl.updateOptions(createOptions(mapOf(usage("cheese", 0.9), usage("wine", 0.4))), dummyConfigGeneration); ctrl.tick(); ctrl.tick(); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); } @Test public void cluster_feed_block_state_is_recomputed_when_resource_block_set_differs() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); var bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.5))); bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700), " + "wine on node 1 [unknown hostname] (0.500 > 0.400)", bundle.getFeedBlock().get().getDescription()); } @Test }
class ClusterFeedBlockTest extends FleetControllerTest { private static final int NODE_COUNT = 3; private Supervisor supervisor; private FleetController ctrl; private DummyCommunicator communicator; private EventLog eventLog; private int dummyConfigGeneration = 2; @Before public void setUp() { supervisor = new Supervisor(new Transport()); } private void initialize(FleetControllerOptions options) throws Exception { List<Node> nodes = new ArrayList<>(); for (int i = 0; i < options.nodes.size(); ++i) { nodes.add(new Node(NodeType.STORAGE, i)); nodes.add(new Node(NodeType.DISTRIBUTOR, i)); } communicator = new DummyCommunicator(nodes, timer); MetricUpdater metricUpdater = new MetricUpdater(new NoMetricReporter(), options.fleetControllerIndex); eventLog = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster(options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, eventLog); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, eventLog, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); ctrl = new FleetController(timer, eventLog, cluster, stateGatherer, communicator, null, null, communicator, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); ctrl.tick(); markAllNodesAsUp(options); ctrl.tick(); } private void markAllNodesAsUp(FleetControllerOptions options) throws Exception { for (int i = 0; i < options.nodes.size(); ++i) { communicator.setNodeState(new Node(NodeType.STORAGE, i), State.UP, ""); communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, i), State.UP, ""); } ctrl.tick(); } public void tearDown() throws Exception { if (supervisor != null) { supervisor.transport().shutdown().join(); supervisor = null; } super.tearDown(); } private static FleetControllerOptions createOptions(Map<String, Double> feedBlockLimits) { FleetControllerOptions options = defaultOptions("mycluster"); options.setStorageDistribution(DistributionBuilder.forFlatCluster(NODE_COUNT)); options.nodes = new HashSet<>(DistributionBuilder.buildConfiguredNodes(NODE_COUNT)); options.clusterFeedBlockEnabled = true; options.clusterFeedBlockLimit = Map.copyOf(feedBlockLimits); return options; } private void reportResourceUsageFromNode(int nodeIndex, Set<FeedBlockUtil.UsageDetails> resourceUsages) throws Exception { String hostInfo = createResourceUsageJson(resourceUsages); communicator.setNodeState(new Node(NodeType.STORAGE, nodeIndex), new NodeState(NodeType.STORAGE, State.UP), hostInfo); ctrl.tick(); } @Test public void cluster_feed_can_be_blocked_and_unblocked_by_single_node() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.5))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.6), usage("wine", 0.3))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); } @Test public void cluster_feed_block_state_is_recomputed_when_options_are_updated() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); ctrl.updateOptions(createOptions(mapOf(usage("cheese", 0.9), usage("wine", 0.4))), dummyConfigGeneration); ctrl.tick(); ctrl.tick(); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); } @Test public void cluster_feed_block_state_is_recomputed_when_resource_block_set_differs() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); var bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.5))); bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700), " + "wine on node 1 [unknown hostname] (0.500 > 0.400)", bundle.getFeedBlock().get().getDescription()); } @Test }
Will change it back in a follow-up
public void cluster_feed_block_state_is_not_recomputed_when_only_resource_usage_levels_differ() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); var bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.9), usage("wine", 0.4))); bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); }
reportResourceUsageFromNode(1, setOf(usage("cheese", 0.9), usage("wine", 0.4)));
public void cluster_feed_block_state_is_not_recomputed_when_only_resource_usage_levels_differ() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); var bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.9), usage("wine", 0.4))); bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); }
class ClusterFeedBlockTest extends FleetControllerTest { private static final int NODE_COUNT = 3; private Supervisor supervisor; private FleetController ctrl; private DummyCommunicator communicator; private EventLog eventLog; private int dummyConfigGeneration = 2; @Before public void setUp() { supervisor = new Supervisor(new Transport()); } private void initialize(FleetControllerOptions options) throws Exception { List<Node> nodes = new ArrayList<>(); for (int i = 0; i < options.nodes.size(); ++i) { nodes.add(new Node(NodeType.STORAGE, i)); nodes.add(new Node(NodeType.DISTRIBUTOR, i)); } communicator = new DummyCommunicator(nodes, timer); MetricUpdater metricUpdater = new MetricUpdater(new NoMetricReporter(), options.fleetControllerIndex); eventLog = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster(options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, eventLog); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, eventLog, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); ctrl = new FleetController(timer, eventLog, cluster, stateGatherer, communicator, null, null, communicator, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); ctrl.tick(); markAllNodesAsUp(options); ctrl.tick(); } private void markAllNodesAsUp(FleetControllerOptions options) throws Exception { for (int i = 0; i < options.nodes.size(); ++i) { communicator.setNodeState(new Node(NodeType.STORAGE, i), State.UP, ""); communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, i), State.UP, ""); } ctrl.tick(); } public void tearDown() throws Exception { if (supervisor != null) { supervisor.transport().shutdown().join(); supervisor = null; } super.tearDown(); } private static FleetControllerOptions createOptions(Map<String, Double> feedBlockLimits) { FleetControllerOptions options = defaultOptions("mycluster"); options.setStorageDistribution(DistributionBuilder.forFlatCluster(NODE_COUNT)); options.nodes = new HashSet<>(DistributionBuilder.buildConfiguredNodes(NODE_COUNT)); options.clusterFeedBlockEnabled = true; options.clusterFeedBlockLimit = Map.copyOf(feedBlockLimits); return options; } private void reportResourceUsageFromNode(int nodeIndex, Set<FeedBlockUtil.UsageDetails> resourceUsages) throws Exception { String hostInfo = createResourceUsageJson(resourceUsages); communicator.setNodeState(new Node(NodeType.STORAGE, nodeIndex), new NodeState(NodeType.STORAGE, State.UP), hostInfo); ctrl.tick(); } @Test public void cluster_feed_can_be_blocked_and_unblocked_by_single_node() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.5))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.6), usage("wine", 0.3))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); } @Test public void cluster_feed_block_state_is_recomputed_when_options_are_updated() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); ctrl.updateOptions(createOptions(mapOf(usage("cheese", 0.9), usage("wine", 0.4))), dummyConfigGeneration); ctrl.tick(); ctrl.tick(); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); } @Test public void cluster_feed_block_state_is_recomputed_when_resource_block_set_differs() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); var bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.5))); bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700), " + "wine on node 1 [unknown hostname] (0.500 > 0.400)", bundle.getFeedBlock().get().getDescription()); } @Test }
class ClusterFeedBlockTest extends FleetControllerTest { private static final int NODE_COUNT = 3; private Supervisor supervisor; private FleetController ctrl; private DummyCommunicator communicator; private EventLog eventLog; private int dummyConfigGeneration = 2; @Before public void setUp() { supervisor = new Supervisor(new Transport()); } private void initialize(FleetControllerOptions options) throws Exception { List<Node> nodes = new ArrayList<>(); for (int i = 0; i < options.nodes.size(); ++i) { nodes.add(new Node(NodeType.STORAGE, i)); nodes.add(new Node(NodeType.DISTRIBUTOR, i)); } communicator = new DummyCommunicator(nodes, timer); MetricUpdater metricUpdater = new MetricUpdater(new NoMetricReporter(), options.fleetControllerIndex); eventLog = new EventLog(timer, metricUpdater); ContentCluster cluster = new ContentCluster(options.clusterName, options.nodes, options.storageDistribution, options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, eventLog); DatabaseHandler database = new DatabaseHandler(new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); StateChangeHandler stateGenerator = new StateChangeHandler(timer, eventLog, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); ctrl = new FleetController(timer, eventLog, cluster, stateGatherer, communicator, null, null, communicator, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); ctrl.tick(); markAllNodesAsUp(options); ctrl.tick(); } private void markAllNodesAsUp(FleetControllerOptions options) throws Exception { for (int i = 0; i < options.nodes.size(); ++i) { communicator.setNodeState(new Node(NodeType.STORAGE, i), State.UP, ""); communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, i), State.UP, ""); } ctrl.tick(); } public void tearDown() throws Exception { if (supervisor != null) { supervisor.transport().shutdown().join(); supervisor = null; } super.tearDown(); } private static FleetControllerOptions createOptions(Map<String, Double> feedBlockLimits) { FleetControllerOptions options = defaultOptions("mycluster"); options.setStorageDistribution(DistributionBuilder.forFlatCluster(NODE_COUNT)); options.nodes = new HashSet<>(DistributionBuilder.buildConfiguredNodes(NODE_COUNT)); options.clusterFeedBlockEnabled = true; options.clusterFeedBlockLimit = Map.copyOf(feedBlockLimits); return options; } private void reportResourceUsageFromNode(int nodeIndex, Set<FeedBlockUtil.UsageDetails> resourceUsages) throws Exception { String hostInfo = createResourceUsageJson(resourceUsages); communicator.setNodeState(new Node(NodeType.STORAGE, nodeIndex), new NodeState(NodeType.STORAGE, State.UP), hostInfo); ctrl.tick(); } @Test public void cluster_feed_can_be_blocked_and_unblocked_by_single_node() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.5))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.6), usage("wine", 0.3))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); } @Test public void cluster_feed_block_state_is_recomputed_when_options_are_updated() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); ctrl.updateOptions(createOptions(mapOf(usage("cheese", 0.9), usage("wine", 0.4))), dummyConfigGeneration); ctrl.tick(); ctrl.tick(); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); } @Test public void cluster_feed_block_state_is_recomputed_when_resource_block_set_differs() throws Exception { initialize(createOptions(mapOf(usage("cheese", 0.7), usage("wine", 0.4)))); assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.3))); var bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700)", bundle.getFeedBlock().get().getDescription()); reportResourceUsageFromNode(1, setOf(usage("cheese", 0.8), usage("wine", 0.5))); bundle = ctrl.getClusterStateBundle(); assertTrue(bundle.clusterFeedIsBlocked()); assertEquals("cheese on node 1 [unknown hostname] (0.800 > 0.700), " + "wine on node 1 [unknown hostname] (0.500 > 0.400)", bundle.getFeedBlock().get().getDescription()); } @Test }
Nit: Consider copying once.
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(db.writeTo(State.inactive, nodes, Agent.application, Optional.empty(), transaction.nested())); return written; }
var stateful = NodeList.copyOf(nodes).stateful();
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. * * @throws IllegalArgumentException if the node has hardware failure */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (node.state() != State.parked && agent != Agent.operator && (node.status().wantToDeprovision() || retiredByOperator(node))) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } private static boolean retiredByOperator(Node node) { return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(agent -> agent == Agent.operator) .orElse(false); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason, transaction); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(node, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(Node node, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } /** Returns a filterable list of all load balancers in this repository */ public LoadBalancerList loadBalancers() { return loadBalancers((ignored) -> true); } /** Returns a filterable list of load balancers belonging to given application */ public LoadBalancerList loadBalancers(ApplicationId application) { return loadBalancers((id) -> id.application().equals(application)); } private LoadBalancerList loadBalancers(Predicate<LoadBalancerId> predicate) { return LoadBalancerList.copyOf(db.readLoadBalancers(predicate).values()); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns the ACL for the node (trusted nodes, networks and ports) */ private NodeAcl getNodeAcl(Node node, NodeList candidates) { Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); trustedPorts.add(22); candidates.parentOf(node).ifPresent(trustedNodes::add); node.allocation().ifPresent(allocation -> { trustedNodes.addAll(candidates.owner(allocation.owner()).asList()); loadBalancers(allocation.owner()).asList().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::networks) .forEach(trustedNetworks::addAll); }); switch (node.type()) { case tenant: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList()); node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.parentsOf(candidates.owner(allocation.owner())).asList())); if (node.state() == State.ready) { trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList()); } break; case config: trustedNodes.addAll(candidates.asList()); trustedPorts.add(4443); break; case proxy: trustedNodes.addAll(candidates.nodeType(NodeType.config).asList()); trustedPorts.add(443); trustedPorts.add(4080); trustedPorts.add(4443); break; case controller: trustedPorts.add(4443); trustedPorts.add(443); trustedPorts.add(80); break; default: illegal("Don't know how to create ACL for " + node + " of type " + node.type()); } return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts); } /** * Creates a list of node ACLs which identify which nodes the given node should trust * * @param node Node for which to generate ACLs * @param children Return ACLs for the children of the given node (e.g. containers on a Docker host) * @return List of node ACLs */ public List<NodeAcl> getNodeAcls(Node node, boolean children) { NodeList candidates = list(); if (children) { return candidates.childrenOf(node).asList().stream() .map(childNode -> getNodeAcl(childNode, candidates)) .collect(Collectors.toUnmodifiableList()); } return List.of(getNodeAcl(node, candidates)); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (node.state() != State.parked && agent != Agent.operator && (node.status().wantToDeprovision() || retiredByOperator(node))) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } private static boolean retiredByOperator(Node node) { return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(agent -> agent == Agent.operator) .orElse(false); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason, transaction); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(node, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(Node node, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Messagebus guarantees a response to each successfully sent message. This counter is incremented whenever an operation is accepted by the document access, which corresponds to this.
public void destroy() { Instant doom = clock.instant().plus(Duration.ofSeconds(30)); visits.values().forEach(VisitorSession::destroy); dispatcher.shutdown(); visitDispatcher.shutdown(); while ( ! (operations.isEmpty() && visitOperations.isEmpty()) && clock.instant().isBefore(doom)) { dispatchEnqueued(); dispatchVisitEnqueued(); } if ( ! operations.isEmpty()) log.log(WARNING, "Failed to empty request queue before shutdown timeout — " + operations.size() + " requests left"); if ( ! visitOperations.isEmpty()) log.log(WARNING, "Failed to empty visitor operations queue before shutdown timeout — " + operations.size() + " operations left"); try { while (outstanding.get() > 0 && clock.instant().isBefore(doom)) Thread.sleep(Math.max(1, Duration.between(clock.instant(), doom).toMillis())); if ( ! dispatcher.awaitTermination(Duration.between(clock.instant(), doom).toMillis(), TimeUnit.MILLISECONDS)) dispatcher.shutdownNow(); if ( ! visitDispatcher.awaitTermination(Duration.between(clock.instant(), doom).toMillis(), TimeUnit.MILLISECONDS)) visitDispatcher.shutdownNow(); } catch (InterruptedException e) { log.log(WARNING, "Interrupted waiting for /document/v1 executor to shut down"); } finally { asyncSession.destroy(); if (outstanding.get() != 0) log.log(WARNING, "Failed to receive a response to " + outstanding.get() + " outstanding document operations during shutdown"); } }
while (outstanding.get() > 0 && clock.instant().isBefore(doom))
public void destroy() { Instant doom = clock.instant().plus(Duration.ofSeconds(30)); visits.values().forEach(VisitorSession::destroy); dispatcher.shutdown(); visitDispatcher.shutdown(); while ( ! (operations.isEmpty() && visitOperations.isEmpty()) && clock.instant().isBefore(doom)) { dispatchEnqueued(); dispatchVisitEnqueued(); } if ( ! operations.isEmpty()) log.log(WARNING, "Failed to empty request queue before shutdown timeout — " + operations.size() + " requests left"); if ( ! visitOperations.isEmpty()) log.log(WARNING, "Failed to empty visitor operations queue before shutdown timeout — " + operations.size() + " operations left"); try { while (outstanding.get() > 0 && clock.instant().isBefore(doom)) Thread.sleep(Math.max(1, Duration.between(clock.instant(), doom).toMillis())); if ( ! dispatcher.awaitTermination(Duration.between(clock.instant(), doom).toMillis(), TimeUnit.MILLISECONDS)) dispatcher.shutdownNow(); if ( ! visitDispatcher.awaitTermination(Duration.between(clock.instant(), doom).toMillis(), TimeUnit.MILLISECONDS)) visitDispatcher.shutdownNow(); } catch (InterruptedException e) { log.log(WARNING, "Interrupted waiting for /document/v1 executor to shut down"); } finally { asyncSession.destroy(); if (outstanding.get() != 0) log.log(WARNING, "Failed to receive a response to " + outstanding.get() + " outstanding document operations during shutdown"); } }
class DocumentV1ApiHandler extends AbstractRequestHandler { private static final Duration defaultTimeout = Duration.ofSeconds(175); private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName()); private static final Parser<Integer> integerParser = Integer::parseInt; private static final Parser<Long> timeoutMillisParser = value -> ParameterParser.asMilliSeconds(value, defaultTimeout.toMillis()); private static final Parser<Boolean> booleanParser = Boolean::parseBoolean; private static final CompletionHandler logException = new CompletionHandler() { @Override public void completed() { } @Override public void failed(Throwable t) { log.log(FINE, "Exception writing or closing response data", t); } }; private static final ContentChannel ignoredContent = new ContentChannel() { @Override public void write(ByteBuffer buf, CompletionHandler handler) { handler.completed(); } @Override public void close(CompletionHandler handler) { handler.completed(); } }; private static final JsonFactory jsonFactory = new JsonFactory(); private static final String CREATE = "create"; private static final String CONDITION = "condition"; private static final String ROUTE = "route"; private static final String FIELD_SET = "fieldSet"; private static final String SELECTION = "selection"; private static final String CLUSTER = "cluster"; private static final String CONTINUATION = "continuation"; private static final String WANTED_DOCUMENT_COUNT = "wantedDocumentCount"; private static final String CONCURRENCY = "concurrency"; private static final String BUCKET_SPACE = "bucketSpace"; private static final String TIMEOUT = "timeout"; private static final String TRACELEVEL = "tracelevel"; private static final String DESTINATION = "destination"; private final Clock clock; private final Metric metric; private final DocumentApiMetrics metrics; private final DocumentOperationParser parser; private final long maxThrottled; private final DocumentAccess access; private final AsyncSession asyncSession; private final Map<String, StorageCluster> clusters; private final Deque<Operation> operations; private final Deque<BooleanSupplier> visitOperations = new ConcurrentLinkedDeque<>(); private final AtomicLong enqueued = new AtomicLong(); private final AtomicLong outstanding = new AtomicLong(); private final Map<VisitorControlHandler, VisitorSession> visits = new ConcurrentHashMap<>(); private final ScheduledExecutorService dispatcher = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("document-api-handler-")); private final ScheduledExecutorService visitDispatcher = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("document-api-handler-visit-")); private final Map<String, Map<Method, Handler>> handlers = defineApi(); @Inject public DocumentV1ApiHandler(Metric metric, MetricReceiver metricReceiver, VespaDocumentAccess documentAccess, DocumentmanagerConfig documentManagerConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig, DocumentOperationExecutorConfig executorConfig) { this(Clock.systemUTC(), metric, metricReceiver, documentAccess, documentManagerConfig, executorConfig, clusterListConfig, bucketSpacesConfig); } DocumentV1ApiHandler(Clock clock, Metric metric, MetricReceiver metricReceiver, DocumentAccess access, DocumentmanagerConfig documentmanagerConfig, DocumentOperationExecutorConfig executorConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig) { this.clock = clock; this.parser = new DocumentOperationParser(documentmanagerConfig); this.metric = metric; this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1"); this.maxThrottled = executorConfig.maxThrottled(); this.access = access; this.asyncSession = access.createAsyncSession(new AsyncParameters()); this.clusters = parseClusters(clusterListConfig, bucketSpacesConfig); this.operations = new ConcurrentLinkedDeque<>(); this.dispatcher.scheduleWithFixedDelay(this::dispatchEnqueued, executorConfig.resendDelayMillis(), executorConfig.resendDelayMillis(), TimeUnit.MILLISECONDS); this.visitDispatcher.scheduleWithFixedDelay(this::dispatchVisitEnqueued, executorConfig.resendDelayMillis(), executorConfig.resendDelayMillis(), TimeUnit.MILLISECONDS); } @Override public ContentChannel handleRequest(Request rawRequest, ResponseHandler rawResponseHandler) { HandlerMetricContextUtil.onHandle(rawRequest, metric, getClass()); ResponseHandler responseHandler = response -> { HandlerMetricContextUtil.onHandled(rawRequest, metric, getClass()); return rawResponseHandler.handleResponse(response); }; HttpRequest request = (HttpRequest) rawRequest; try { request.setTimeout(getProperty(request, TIMEOUT, timeoutMillisParser) .orElse(defaultTimeout.toMillis()), TimeUnit.MILLISECONDS); Path requestPath = new Path(request.getUri()); for (String path : handlers.keySet()) if (requestPath.matches(path)) { Map<Method, Handler> methods = handlers.get(path); if (methods.containsKey(request.getMethod())) return methods.get(request.getMethod()).handle(request, new DocumentPath(requestPath), responseHandler); if (request.getMethod() == OPTIONS) options(methods.keySet(), responseHandler); methodNotAllowed(request, methods.keySet(), responseHandler); } notFound(request, handlers.keySet(), responseHandler); } catch (IllegalArgumentException e) { badRequest(request, e, responseHandler); } catch (RuntimeException e) { serverError(request, e, responseHandler); } return ignoredContent; } @Override public void handleTimeout(Request request, ResponseHandler responseHandler) { timeout((HttpRequest) request, "Request timeout after " + request.getTimeout(TimeUnit.MILLISECONDS) + "ms", responseHandler); } @Override @FunctionalInterface interface Handler { ContentChannel handle(HttpRequest request, DocumentPath path, ResponseHandler handler); } /** Defines all paths/methods handled by this handler. */ private Map<String, Map<Method, Handler>> defineApi() { Map<String, Map<Method, Handler>> handlers = new LinkedHashMap<>(); handlers.put("/document/v1/", Map.of(GET, this::getDocuments, POST, this::postDocuments, DELETE, this::deleteDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/", Map.of(GET, this::getDocuments, POST, this::postDocuments, PUT, this::putDocuments, DELETE, this::deleteDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/", Map.of(GET, this::getDocuments, POST, this::postDocuments, PUT, this::putDocuments, DELETE, this::deleteDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/", Map.of(GET, this::getDocuments, POST, this::postDocuments, PUT, this::putDocuments, DELETE, this::deleteDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); return Collections.unmodifiableMap(handlers); } private ContentChannel getDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); return () -> { visitAndWrite(request, parameters, handler); return true; }; }); return ignoredContent; } private ContentChannel postDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); parameters.setRemoteDataHandler(getProperty(request, DESTINATION).orElseThrow(() -> new IllegalArgumentException("Missing required property '" + DESTINATION + "'"))); return () -> { visitWithRemote(request, parameters, handler); return true; }; }); return ignoredContent; } private ContentChannel putDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { if (getProperty(request, SELECTION).isEmpty()) throw new IllegalArgumentException("Missing required property '" + SELECTION + "'"); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { String type = path.documentType().orElseThrow(() -> new IllegalStateException("Document type must be specified for mass updates")); IdIdString dummyId = new IdIdString("dummy", type, "", ""); VisitorParameters parameters = parseParameters(request, path); parameters.setFieldSet(DocIdOnly.NAME); DocumentUpdate update = parser.parseUpdate(in, dummyId.toString()); update.setCondition(new TestAndSetCondition(parameters.getDocumentSelection())); return () -> { visitAndUpdate(request, parameters, handler, update, getProperty(request, DESTINATION)); return true; }; }); }); } private ContentChannel deleteDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { if (getProperty(request, SELECTION).isEmpty()) throw new IllegalArgumentException("Missing required property '" + SELECTION + "'"); VisitorParameters parameters = parseParameters(request, path); parameters.setFieldSet(DocIdOnly.NAME); TestAndSetCondition condition = new TestAndSetCondition(parameters.getDocumentSelection()); return () -> { visitAndDelete(request, parameters, handler, condition, getProperty(request, DESTINATION)); return true; }; }); return ignoredContent; } private ContentChannel getDocument(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { DocumentOperationParameters rawParameters = parametersFromRequest(request, CLUSTER, FIELD_SET); if (rawParameters.fieldSet().isEmpty()) rawParameters = rawParameters.withFieldSet(path.documentType().orElseThrow() + ":[document]"); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> { outstanding.decrementAndGet(); handle(path, handler, response, (document, jsonResponse) -> { if (document != null) { jsonResponse.writeSingleDocument(document); jsonResponse.commit(Response.Status.OK); } else jsonResponse.commit(Response.Status.NOT_FOUND); }); }); return () -> dispatchOperation(() -> asyncSession.get(path.id(), parameters)); }); return ignoredContent; } private ContentChannel postDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.PUT, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentPut put = parser.parsePut(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(put::setCondition); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> { outstanding.decrementAndGet(); handle(path, handler, response); }); return () -> dispatchOperation(() -> asyncSession.put(put, parameters)); }); }); } private ContentChannel putDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.UPDATE, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentUpdate update = parser.parseUpdate(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(update::setCondition); getProperty(request, CREATE, booleanParser).ifPresent(update::setCreateIfNonExistent); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> { outstanding.decrementAndGet(); handle(path, handler, response); }); return () -> dispatchOperation(() -> asyncSession.update(update, parameters)); }); }); } private ContentChannel deleteDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.REMOVE, clock.instant()); enqueueAndDispatch(request, handler, () -> { DocumentRemove remove = new DocumentRemove(path.id()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(remove::setCondition); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> { outstanding.decrementAndGet(); handle(path, handler, response); }); return () -> dispatchOperation(() -> asyncSession.remove(remove, parameters)); }); return ignoredContent; } private DocumentOperationParameters parametersFromRequest(HttpRequest request, String... names) { DocumentOperationParameters parameters = getProperty(request, TRACELEVEL, integerParser).map(parameters()::withTraceLevel) .orElse(parameters()); for (String name : names) switch (name) { case CLUSTER: parameters = getProperty(request, CLUSTER).map(cluster -> resolveCluster(Optional.of(cluster), clusters).route()) .map(parameters::withRoute) .orElse(parameters); break; case FIELD_SET: parameters = getProperty(request, FIELD_SET).map(parameters::withFieldSet) .orElse(parameters); break; case ROUTE: parameters = getProperty(request, ROUTE).map(parameters::withRoute) .orElse(parameters); break; default: throw new IllegalArgumentException("Unrecognized document operation parameter name '" + name + "'"); } return parameters; } /** Dispatches enqueued requests until one is blocked. */ void dispatchEnqueued() { try { while (dispatchFirst()); } catch (Exception e) { log.log(WARNING, "Uncaught exception in /document/v1 dispatch thread", e); } } /** Attempts to dispatch the first enqueued operations, and returns whether this was successful. */ private boolean dispatchFirst() { Operation operation = operations.poll(); if (operation == null) return false; if (operation.dispatch()) { enqueued.decrementAndGet(); return true; } operations.push(operation); return false; } /** Dispatches enqueued requests until one is blocked. */ void dispatchVisitEnqueued() { try { while (dispatchFirstVisit()); } catch (Exception e) { log.log(WARNING, "Uncaught exception in /document/v1 dispatch thread", e); } } /** Attempts to dispatch the first enqueued visit operations, and returns whether this was successful. */ private boolean dispatchFirstVisit() { BooleanSupplier operation = visitOperations.poll(); if (operation == null) return false; if (operation.getAsBoolean()) return true; visitOperations.push(operation); return false; } /** * Enqueues the given request and operation, or responds with "overload" if the queue is full, * and then attempts to dispatch an enqueued operation from the head of the queue. */ private void enqueueAndDispatch(HttpRequest request, ResponseHandler handler, Supplier<BooleanSupplier> operationParser) { if (enqueued.incrementAndGet() > maxThrottled) { enqueued.decrementAndGet(); overload(request, "Rejecting execution due to overload: " + maxThrottled + " requests already enqueued", handler); return; } operations.offer(new Operation(request, handler) { @Override BooleanSupplier parse() { return operationParser.get(); } }); dispatchFirst(); } /** Class for writing and returning JSON responses to document operations in a thread safe manner. */ private static class JsonResponse implements AutoCloseable { private final BufferedContentChannel buffer = new BufferedContentChannel(); private final OutputStream out = new ContentChannelOutputStream(buffer); private final JsonGenerator json = jsonFactory.createGenerator(out); private final ResponseHandler handler; private ContentChannel channel; private JsonResponse(ResponseHandler handler) throws IOException { this.handler = handler; json.writeStartObject(); } /** Creates a new JsonResponse with path and id fields written. */ static JsonResponse create(DocumentPath path, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(path.rawPath()); response.writeDocId(path.id()); return response; } /** Creates a new JsonResponse with path field written. */ static JsonResponse create(HttpRequest request, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); return response; } /** Creates a new JsonResponse with path and message fields written. */ static JsonResponse create(HttpRequest request, String message, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); response.writeMessage(message); return response; } /** Commits a response with the given status code and some default headers, and writes whatever content is buffered. */ synchronized void commit(int status) throws IOException { Response response = new Response(status); response.headers().addAll(Map.of("Content-Type", List.of("application/json; charset=UTF-8"))); try { channel = handler.handleResponse(response); buffer.connectTo(channel); } catch (RuntimeException e) { throw new IOException(e); } } /** Commits a response with the given status code and some default headers, writes buffered content, and closes this. */ synchronized void respond(int status) throws IOException { try (this) { commit(status); } } /** Closes the JSON and the output content channel of this. */ @Override public synchronized void close() throws IOException { try { if (channel == null) { log.log(WARNING, "Close called before response was committed, in " + getClass().getName()); commit(Response.Status.INTERNAL_SERVER_ERROR); } json.close(); out.close(); } finally { if (channel != null) channel.close(logException); } } synchronized void writePathId(String path) throws IOException { json.writeStringField("pathId", path); } synchronized void writeMessage(String message) throws IOException { json.writeStringField("message", message); } synchronized void writeDocId(DocumentId id) throws IOException { json.writeStringField("id", id.toString()); } synchronized void writeTrace(Trace trace) throws IOException { if (trace != null && ! trace.getRoot().isEmpty()) { writeTrace(trace.getRoot()); } } private void writeTrace(TraceNode node) throws IOException { if (node.hasNote()) json.writeStringField("message", node.getNote()); if ( ! node.isLeaf()) { json.writeArrayFieldStart(node.isStrict() ? "trace" : "fork"); for (int i = 0; i < node.getNumChildren(); i++) { json.writeStartObject(); writeTrace(node.getChild(i)); json.writeEndObject(); } json.writeEndArray(); } } synchronized void writeSingleDocument(Document document) throws IOException { new JsonWriter(json).writeFields(document); } synchronized void writeDocumentsArrayStart() throws IOException { json.writeArrayFieldStart("documents"); } synchronized void writeDocumentValue(Document document) { new JsonWriter(json).write(document); } synchronized void writeArrayEnd() throws IOException { json.writeEndArray(); } synchronized void writeContinuation(String token) throws IOException { json.writeStringField("continuation", token); } } private static void options(Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { Response response = new Response(Response.Status.NO_CONTENT); response.headers().add("Allow", methods.stream().sorted().map(Method::name).collect(joining(","))); handler.handleResponse(response).close(logException); }); } private static void badRequest(HttpRequest request, IllegalArgumentException e, ResponseHandler handler) { loggingException(() -> { String message = Exceptions.toMessageString(e); log.log(FINE, () -> "Bad request for " + request.getMethod() + " at " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.BAD_REQUEST); }); } private static void notFound(HttpRequest request, Collection<String> paths, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "Nothing at '" + request.getUri().getRawPath() + "'. " + "Available paths are:\n" + String.join("\n", paths), handler) .respond(Response.Status.NOT_FOUND); }); } private static void methodNotAllowed(HttpRequest request, Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "'" + request.getMethod() + "' not allowed at '" + request.getUri().getRawPath() + "'. " + "Allowed methods are: " + methods.stream().sorted().map(Method::name).collect(joining(", ")), handler) .respond(Response.Status.METHOD_NOT_ALLOWED); }); } private static void overload(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Overload handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.TOO_MANY_REQUESTS); }); } private static void serverError(HttpRequest request, Throwable t, ResponseHandler handler) { loggingException(() -> { log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ":", t); JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.INTERNAL_SERVER_ERROR); }); } private static void timeout(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Timeout handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.GATEWAY_TIMEOUT); }); } private static void loggingException(RunnableThrowingIOException runnable) { try { runnable.run(); } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static abstract class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private BooleanSupplier operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * Returns {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Concurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.getAsBoolean(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; } abstract BooleanSupplier parse(); } /** Attempts to send the given document operation, returning false if this needs to be retried. */ private boolean dispatchOperation(Supplier<Result> documentOperation) { Result result = documentOperation.get(); if (result.type() == Result.ResultType.TRANSIENT_ERROR) return false; if (result.type() == Result.ResultType.FATAL_ERROR) throw new RuntimeException(result.getError()); outstanding.incrementAndGet(); return true; } /** Readable content channel which forwards data to a reader when closed. */ static class ForwardingContentChannel implements ContentChannel { private final ReadableContentChannel delegate = new ReadableContentChannel(); private final Consumer<InputStream> reader; public ForwardingContentChannel(Consumer<InputStream> reader) { this.reader = reader; } /** Write is complete when we have stored the buffer — call completion handler. */ @Override public void write(ByteBuffer buf, CompletionHandler handler) { try { delegate.write(buf, logException); handler.completed(); } catch (Exception e) { handler.failed(e); } } /** Close is complete when we have closed the buffer. */ @Override public void close(CompletionHandler handler) { try { delegate.close(logException); reader.accept(new UnsafeContentInputStream(delegate)); handler.completed(); } catch (Exception e) { handler.failed(e); } } } static class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } DocumentPut parsePut(InputStream inputStream, String docId) { return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT); } DocumentUpdate parseUpdate(InputStream inputStream, String docId) { return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE); } private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } } interface SuccessCallback { void onSuccess(Document document, JsonResponse response) throws IOException; } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response, SuccessCallback callback) { try (JsonResponse jsonResponse = JsonResponse.create(path, handler)) { jsonResponse.writeTrace(response.getTrace()); if (response.isSuccess()) callback.onSuccess((response instanceof DocumentResponse) ? ((DocumentResponse) response).getDocument() : null, jsonResponse); else { jsonResponse.writeMessage(response.getTextMessage()); switch (response.outcome()) { case NOT_FOUND: jsonResponse.commit(Response.Status.NOT_FOUND); break; case CONDITION_FAILED: jsonResponse.commit(Response.Status.PRECONDITION_FAILED); break; case INSUFFICIENT_STORAGE: jsonResponse.commit(Response.Status.INSUFFICIENT_STORAGE); break; default: log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'"); case ERROR: log.log(FINE, () -> "Exception performing document operation: " + response.getTextMessage()); jsonResponse.commit(Response.Status.INTERNAL_SERVER_ERROR); } } } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response) { handle(path, handler, response, (document, jsonResponse) -> jsonResponse.commit(Response.Status.OK)); } private VisitorParameters parseParameters(HttpRequest request, DocumentPath path) { int wantedDocumentCount = Math.min(1 << 10, getProperty(request, WANTED_DOCUMENT_COUNT, integerParser).orElse(1)); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); int concurrency = Math.min(100, getProperty(request, CONCURRENCY, integerParser).orElse(1)); if (concurrency <= 0) throw new IllegalArgumentException("concurrency must be positive"); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = new VisitorParameters(Stream.of(getProperty(request, SELECTION), path.documentType(), path.namespace().map(value -> "id.namespace=='" + value + "'"), path.group().map(Group::selection)) .flatMap(Optional::stream) .reduce(new StringJoiner(") and (", "(", ")").setEmptyValue(""), StringJoiner::add, StringJoiner::merge) .toString()); getProperty(request, CONTINUATION).map(ProgressToken::fromSerializedString).ifPresent(parameters::setResumeToken); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(concurrency)); parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - 5000)); parameters.visitInconsistentBuckets(true); parameters.setPriority(DocumentProtocol.Priority.NORMAL_4); StorageCluster storageCluster = resolveCluster(cluster, clusters); parameters.setRoute(storageCluster.route()); parameters.setBucketSpace(resolveBucket(storageCluster, path.documentType(), List.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()), getProperty(request, BUCKET_SPACE))); return parameters; } private interface VisitCallback { /** Called at the start of response rendering. */ default void onStart(JsonResponse response) throws IOException { } /** Called for every document received from backend visitors — must call the ack for these to proceed. */ default void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) { } /** Called at the end of response rendering, before generic status data is written. */ default void onEnd(JsonResponse response) throws IOException { } } private void visitAndDelete(HttpRequest request, VisitorParameters parameters, ResponseHandler handler, TestAndSetCondition condition, Optional<String> route) { visitAndProcess(request, parameters, handler, route, (id, operationParameters) -> { DocumentRemove remove = new DocumentRemove(id); remove.setCondition(condition); return asyncSession.remove(remove, operationParameters); }); } private void visitAndUpdate(HttpRequest request, VisitorParameters parameters, ResponseHandler handler, DocumentUpdate protoUpdate, Optional<String> route) { visitAndProcess(request, parameters, handler, route, (id, operationParameters) -> { DocumentUpdate update = new DocumentUpdate(protoUpdate); update.setId(id); return asyncSession.update(update, operationParameters); }); } private void visitAndProcess(HttpRequest request, VisitorParameters parameters, ResponseHandler handler, Optional<String> route, BiFunction<DocumentId, DocumentOperationParameters, Result> operation) { visit(request, parameters, handler, new VisitCallback() { @Override public void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) { DocumentOperationParameters operationParameters = (route.isEmpty() ? parameters() : parameters().withRoute(route.get())) .withResponseHandler(operationResponse -> { outstanding.decrementAndGet(); switch (operationResponse.outcome()) { case SUCCESS: case NOT_FOUND: case CONDITION_FAILED: break; case ERROR: case INSUFFICIENT_STORAGE: onError.accept(operationResponse.getTextMessage()); break; default: onError.accept("Unexpected response " + operationResponse); } }); visitOperations.offer(() -> { Result result = operation.apply(document.getId(), operationParameters); if (result.type() == Result.ResultType.TRANSIENT_ERROR) return false; if (result.type() == Result.ResultType.FATAL_ERROR) onError.accept(result.getError().getMessage()); else outstanding.incrementAndGet(); ack.run(); return true; }); dispatchFirstVisit(); } }); } private void visitAndWrite(HttpRequest request, VisitorParameters parameters, ResponseHandler handler) { visit(request, parameters, handler, new VisitCallback() { @Override public void onStart(JsonResponse response) throws IOException { response.writeDocumentsArrayStart(); } @Override public void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) { response.writeDocumentValue(document); ack.run(); } @Override public void onEnd(JsonResponse response) throws IOException { response.writeArrayEnd(); } }); } private void visitWithRemote(HttpRequest request, VisitorParameters parameters, ResponseHandler handler) { visit(request, parameters, handler, new VisitCallback() { }); } private void visit(HttpRequest request, VisitorParameters parameters, ResponseHandler handler, VisitCallback callback) { try { JsonResponse response = JsonResponse.create(request, handler); Phaser phaser = new Phaser(2); AtomicReference<String> error = new AtomicReference<>(); callback.onStart(response); VisitorControlHandler controller = new VisitorControlHandler() { @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); loggingException(() -> { callback.onEnd(response); switch (code) { case TIMEOUT: if ( ! hasVisitedAnyBuckets()) { response.writeMessage("No buckets visited within timeout of " + parameters.getSessionTimeoutMs() + "ms (request timeout -5s)"); response.respond(Response.Status.GATEWAY_TIMEOUT); break; } case SUCCESS: case ABORTED: if (error.get() == null) { if (getProgress() != null && ! getProgress().isFinished()) response.writeContinuation(getProgress().serializeToString()); response.respond(Response.Status.OK); break; } default: response.writeMessage(error.get() != null ? error.get() : message != null ? message : "Visiting failed"); response.respond(Response.Status.INTERNAL_SERVER_ERROR); } visitDispatcher.execute(() -> { phaser.arriveAndAwaitAdvance(); visits.remove(this).destroy(); }); }); } }; if (parameters.getRemoteDataHandler() == null) { parameters.setLocalDataHandler(new VisitorDataHandler() { @Override public void onMessage(Message m, AckToken token) { if (m instanceof PutDocumentMessage) callback.onDocument(response, ((PutDocumentMessage) m).getDocumentPut().getDocument(), () -> ack(token), errorMessage -> { error.set(errorMessage); controller.abort(); }); else throw new UnsupportedOperationException("Only PutDocumentMessage is supported, but got a " + m.getClass()); } }); } parameters.setControlHandler(controller); visits.put(controller, access.createVisitorSession(parameters)); phaser.arriveAndDeregister(); } catch (ParseException e) { badRequest(request, new IllegalArgumentException(e), handler); } catch (IOException e) { log.log(FINE, "Failed writing response", e); } } /** Returns the last property with the given name, if present, or throws if this is empty or blank. */ private static Optional<String> getProperty(HttpRequest request, String name) { if ( ! request.parameters().containsKey(name)) return Optional.empty(); List<String> values = request.parameters().get(name); String value; if (values == null || values.isEmpty() || (value = values.get(values.size() - 1)) == null || value.isEmpty()) throw new IllegalArgumentException("Expected non-empty value for request property '" + name + "'"); return Optional.of(value); } private static <T> Optional<T> getProperty(HttpRequest request, String name, Parser<T> parser) { return getProperty(request, name).map(parser::parse); } @FunctionalInterface interface Parser<T> extends Function<String, T> { default T parse(String value) { try { return apply(value); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing '" + value + "': " + Exceptions.toMessageString(e)); } } } private class MeasuringResponseHandler implements ResponseHandler { private final ResponseHandler delegate; private final com.yahoo.documentapi.metrics.DocumentOperationType type; private final Instant start; private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) { this.delegate = delegate; this.type = type; this.start = start; } @Override public ContentChannel handleResponse(Response response) { switch (response.getStatus() / 100) { case 2: metrics.reportSuccessful(type, start); break; case 4: metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR); break; case 5: metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR); break; } return delegate.handleResponse(response); } } static class StorageCluster { private final String name; private final Map<String, String> documentBuckets; StorageCluster(String name, Map<String, String> documentBuckets) { this.name = requireNonNull(name); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Content:cluster=" + name() + "]"; } Optional<String> bucketOf(String documentType) { return Optional.ofNullable(documentBuckets.get(documentType)); } } private static Map<String, StorageCluster> parseClusters(ClusterListConfig clusters, AllClustersBucketSpacesConfig buckets) { return clusters.storage().stream() .collect(toUnmodifiableMap(storage -> storage.name(), storage -> new StorageCluster(storage.name(), buckets.cluster(storage.name()) .documentType().entrySet().stream() .collect(toMap(entry -> entry.getKey(), entry -> entry.getValue().bucketSpace()))))); } static StorageCluster resolveCluster(Optional<String> wanted, Map<String, StorageCluster> clusters) { if (clusters.isEmpty()) throw new IllegalArgumentException("Your Vespa deployment has no content clusters, so the document API is not enabled"); return wanted.map(cluster -> { if ( ! clusters.containsKey(cluster)) throw new IllegalArgumentException("Your Vespa deployment has no content cluster '" + cluster + "', only '" + String.join("', '", clusters.keySet()) + "'"); return clusters.get(cluster); }).orElseGet(() -> { if (clusters.size() > 1) throw new IllegalArgumentException("Please specify one of the content clusters in your Vespa deployment: '" + String.join("', '", clusters.keySet()) + "'"); return clusters.values().iterator().next(); }); } static String resolveBucket(StorageCluster cluster, Optional<String> documentType, List<String> bucketSpaces, Optional<String> bucketSpace) { return documentType.map(type -> cluster.bucketOf(type) .orElseThrow(() -> new IllegalArgumentException("Document type '" + type + "' in cluster '" + cluster.name() + "' is not mapped to a known bucket space"))) .or(() -> bucketSpace.map(space -> { if ( ! bucketSpaces.contains(space)) throw new IllegalArgumentException("Bucket space '" + space + "' is not a known bucket space; expected one of " + String.join(", ", bucketSpaces)); return space; })) .orElse(FixedBucketSpaces.defaultSpace()); } private static class DocumentPath { private final Path path; private final Optional<Group> group; DocumentPath(Path path) { this.path = requireNonNull(path); this.group = Optional.ofNullable(path.get("number")).map(integerParser::parse).map(Group::of) .or(() -> Optional.ofNullable(path.get("group")).map(Group::of)); } DocumentId id() { return new DocumentId("id:" + requireNonNull(path.get("namespace")) + ":" + requireNonNull(path.get("documentType")) + ":" + group.map(Group::docIdPart).orElse("") + ":" + requireNonNull(path.getRest())); } String rawPath() { return path.asString(); } Optional<String> documentType() { return Optional.ofNullable(path.get("documentType")); } Optional<String> namespace() { return Optional.ofNullable(path.get("namespace")); } Optional<Group> group() { return group; } } static class Group { private final String value; private final String docIdPart; private final String selection; private Group(String value, String docIdPart, String selection) { Text.validateTextString(value) .ifPresent(codePoint -> { throw new IllegalArgumentException(String.format("Illegal code point U%04X in group", codePoint)); }); this.value = value; this.docIdPart = docIdPart; this.selection = selection; } public static Group of(long value) { return new Group(Long.toString(value), "n=" + value, "id.user==" + value); } public static Group of(String value) { return new Group(value, "g=" + value, "id.group=='" + value.replaceAll("'", "\\\\'") + "'"); } public String value() { return value; } public String docIdPart() { return docIdPart; } public String selection() { return selection; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Group group = (Group) o; return value.equals(group.value) && docIdPart.equals(group.docIdPart) && selection.equals(group.selection); } @Override public int hashCode() { return Objects.hash(value, docIdPart, selection); } @Override public String toString() { return "Group{" + "value='" + value + '\'' + ", docIdPart='" + docIdPart + '\'' + ", selection='" + selection + '\'' + '}'; } } }
class DocumentV1ApiHandler extends AbstractRequestHandler { private static final Duration defaultTimeout = Duration.ofSeconds(175); private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName()); private static final Parser<Integer> integerParser = Integer::parseInt; private static final Parser<Long> timeoutMillisParser = value -> ParameterParser.asMilliSeconds(value, defaultTimeout.toMillis()); private static final Parser<Boolean> booleanParser = Boolean::parseBoolean; private static final CompletionHandler logException = new CompletionHandler() { @Override public void completed() { } @Override public void failed(Throwable t) { log.log(FINE, "Exception writing or closing response data", t); } }; private static final ContentChannel ignoredContent = new ContentChannel() { @Override public void write(ByteBuffer buf, CompletionHandler handler) { handler.completed(); } @Override public void close(CompletionHandler handler) { handler.completed(); } }; private static final JsonFactory jsonFactory = new JsonFactory(); private static final String CREATE = "create"; private static final String CONDITION = "condition"; private static final String ROUTE = "route"; private static final String FIELD_SET = "fieldSet"; private static final String SELECTION = "selection"; private static final String CLUSTER = "cluster"; private static final String CONTINUATION = "continuation"; private static final String WANTED_DOCUMENT_COUNT = "wantedDocumentCount"; private static final String CONCURRENCY = "concurrency"; private static final String BUCKET_SPACE = "bucketSpace"; private static final String TIMEOUT = "timeout"; private static final String TRACELEVEL = "tracelevel"; private static final String DESTINATION = "destination"; private final Clock clock; private final Metric metric; private final DocumentApiMetrics metrics; private final DocumentOperationParser parser; private final long maxThrottled; private final DocumentAccess access; private final AsyncSession asyncSession; private final Map<String, StorageCluster> clusters; private final Deque<Operation> operations; private final Deque<BooleanSupplier> visitOperations = new ConcurrentLinkedDeque<>(); private final AtomicLong enqueued = new AtomicLong(); private final AtomicLong outstanding = new AtomicLong(); private final Map<VisitorControlHandler, VisitorSession> visits = new ConcurrentHashMap<>(); private final ScheduledExecutorService dispatcher = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("document-api-handler-")); private final ScheduledExecutorService visitDispatcher = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("document-api-handler-visit-")); private final Map<String, Map<Method, Handler>> handlers = defineApi(); @Inject public DocumentV1ApiHandler(Metric metric, MetricReceiver metricReceiver, VespaDocumentAccess documentAccess, DocumentmanagerConfig documentManagerConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig, DocumentOperationExecutorConfig executorConfig) { this(Clock.systemUTC(), metric, metricReceiver, documentAccess, documentManagerConfig, executorConfig, clusterListConfig, bucketSpacesConfig); } DocumentV1ApiHandler(Clock clock, Metric metric, MetricReceiver metricReceiver, DocumentAccess access, DocumentmanagerConfig documentmanagerConfig, DocumentOperationExecutorConfig executorConfig, ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig) { this.clock = clock; this.parser = new DocumentOperationParser(documentmanagerConfig); this.metric = metric; this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1"); this.maxThrottled = executorConfig.maxThrottled(); this.access = access; this.asyncSession = access.createAsyncSession(new AsyncParameters()); this.clusters = parseClusters(clusterListConfig, bucketSpacesConfig); this.operations = new ConcurrentLinkedDeque<>(); this.dispatcher.scheduleWithFixedDelay(this::dispatchEnqueued, executorConfig.resendDelayMillis(), executorConfig.resendDelayMillis(), TimeUnit.MILLISECONDS); this.visitDispatcher.scheduleWithFixedDelay(this::dispatchVisitEnqueued, executorConfig.resendDelayMillis(), executorConfig.resendDelayMillis(), TimeUnit.MILLISECONDS); } @Override public ContentChannel handleRequest(Request rawRequest, ResponseHandler rawResponseHandler) { HandlerMetricContextUtil.onHandle(rawRequest, metric, getClass()); ResponseHandler responseHandler = response -> { HandlerMetricContextUtil.onHandled(rawRequest, metric, getClass()); return rawResponseHandler.handleResponse(response); }; HttpRequest request = (HttpRequest) rawRequest; try { request.setTimeout(getProperty(request, TIMEOUT, timeoutMillisParser) .orElse(defaultTimeout.toMillis()), TimeUnit.MILLISECONDS); Path requestPath = new Path(request.getUri()); for (String path : handlers.keySet()) if (requestPath.matches(path)) { Map<Method, Handler> methods = handlers.get(path); if (methods.containsKey(request.getMethod())) return methods.get(request.getMethod()).handle(request, new DocumentPath(requestPath), responseHandler); if (request.getMethod() == OPTIONS) options(methods.keySet(), responseHandler); methodNotAllowed(request, methods.keySet(), responseHandler); } notFound(request, handlers.keySet(), responseHandler); } catch (IllegalArgumentException e) { badRequest(request, e, responseHandler); } catch (RuntimeException e) { serverError(request, e, responseHandler); } return ignoredContent; } @Override public void handleTimeout(Request request, ResponseHandler responseHandler) { timeout((HttpRequest) request, "Request timeout after " + request.getTimeout(TimeUnit.MILLISECONDS) + "ms", responseHandler); } @Override @FunctionalInterface interface Handler { ContentChannel handle(HttpRequest request, DocumentPath path, ResponseHandler handler); } /** Defines all paths/methods handled by this handler. */ private Map<String, Map<Method, Handler>> defineApi() { Map<String, Map<Method, Handler>> handlers = new LinkedHashMap<>(); handlers.put("/document/v1/", Map.of(GET, this::getDocuments, POST, this::postDocuments, DELETE, this::deleteDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/", Map.of(GET, this::getDocuments, POST, this::postDocuments, PUT, this::putDocuments, DELETE, this::deleteDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/", Map.of(GET, this::getDocuments, POST, this::postDocuments, PUT, this::putDocuments, DELETE, this::deleteDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/", Map.of(GET, this::getDocuments, POST, this::postDocuments, PUT, this::putDocuments, DELETE, this::deleteDocuments)); handlers.put("/document/v1/{namespace}/{documentType}/docid/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/group/{group}/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); handlers.put("/document/v1/{namespace}/{documentType}/number/{number}/{*}", Map.of(GET, this::getDocument, POST, this::postDocument, PUT, this::putDocument, DELETE, this::deleteDocument)); return Collections.unmodifiableMap(handlers); } private ContentChannel getDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); return () -> { visitAndWrite(request, parameters, handler); return true; }; }); return ignoredContent; } private ContentChannel postDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); parameters.setRemoteDataHandler(getProperty(request, DESTINATION).orElseThrow(() -> new IllegalArgumentException("Missing required property '" + DESTINATION + "'"))); return () -> { visitWithRemote(request, parameters, handler); return true; }; }); return ignoredContent; } private ContentChannel putDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { if (getProperty(request, SELECTION).isEmpty()) throw new IllegalArgumentException("Missing required property '" + SELECTION + "'"); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { String type = path.documentType().orElseThrow(() -> new IllegalStateException("Document type must be specified for mass updates")); IdIdString dummyId = new IdIdString("dummy", type, "", ""); VisitorParameters parameters = parseParameters(request, path); parameters.setFieldSet(DocIdOnly.NAME); DocumentUpdate update = parser.parseUpdate(in, dummyId.toString()); update.setCondition(new TestAndSetCondition(parameters.getDocumentSelection())); return () -> { visitAndUpdate(request, parameters, handler, update, getProperty(request, DESTINATION)); return true; }; }); }); } private ContentChannel deleteDocuments(HttpRequest request, DocumentPath path, ResponseHandler handler) { if (getProperty(request, SELECTION).isEmpty()) throw new IllegalArgumentException("Missing required property '" + SELECTION + "'"); enqueueAndDispatch(request, handler, () -> { VisitorParameters parameters = parseParameters(request, path); parameters.setFieldSet(DocIdOnly.NAME); TestAndSetCondition condition = new TestAndSetCondition(parameters.getDocumentSelection()); return () -> { visitAndDelete(request, parameters, handler, condition, getProperty(request, DESTINATION)); return true; }; }); return ignoredContent; } private ContentChannel getDocument(HttpRequest request, DocumentPath path, ResponseHandler handler) { enqueueAndDispatch(request, handler, () -> { DocumentOperationParameters rawParameters = parametersFromRequest(request, CLUSTER, FIELD_SET); if (rawParameters.fieldSet().isEmpty()) rawParameters = rawParameters.withFieldSet(path.documentType().orElseThrow() + ":[document]"); DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> { outstanding.decrementAndGet(); handle(path, handler, response, (document, jsonResponse) -> { if (document != null) { jsonResponse.writeSingleDocument(document); jsonResponse.commit(Response.Status.OK); } else jsonResponse.commit(Response.Status.NOT_FOUND); }); }); return () -> dispatchOperation(() -> asyncSession.get(path.id(), parameters)); }); return ignoredContent; } private ContentChannel postDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.PUT, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentPut put = parser.parsePut(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(put::setCondition); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> { outstanding.decrementAndGet(); handle(path, handler, response); }); return () -> dispatchOperation(() -> asyncSession.put(put, parameters)); }); }); } private ContentChannel putDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.UPDATE, clock.instant()); return new ForwardingContentChannel(in -> { enqueueAndDispatch(request, handler, () -> { DocumentUpdate update = parser.parseUpdate(in, path.id().toString()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(update::setCondition); getProperty(request, CREATE, booleanParser).ifPresent(update::setCreateIfNonExistent); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> { outstanding.decrementAndGet(); handle(path, handler, response); }); return () -> dispatchOperation(() -> asyncSession.update(update, parameters)); }); }); } private ContentChannel deleteDocument(HttpRequest request, DocumentPath path, ResponseHandler rawHandler) { ResponseHandler handler = new MeasuringResponseHandler(rawHandler, com.yahoo.documentapi.metrics.DocumentOperationType.REMOVE, clock.instant()); enqueueAndDispatch(request, handler, () -> { DocumentRemove remove = new DocumentRemove(path.id()); getProperty(request, CONDITION).map(TestAndSetCondition::new).ifPresent(remove::setCondition); DocumentOperationParameters parameters = parametersFromRequest(request, ROUTE) .withResponseHandler(response -> { outstanding.decrementAndGet(); handle(path, handler, response); }); return () -> dispatchOperation(() -> asyncSession.remove(remove, parameters)); }); return ignoredContent; } private DocumentOperationParameters parametersFromRequest(HttpRequest request, String... names) { DocumentOperationParameters parameters = getProperty(request, TRACELEVEL, integerParser).map(parameters()::withTraceLevel) .orElse(parameters()); for (String name : names) switch (name) { case CLUSTER: parameters = getProperty(request, CLUSTER).map(cluster -> resolveCluster(Optional.of(cluster), clusters).route()) .map(parameters::withRoute) .orElse(parameters); break; case FIELD_SET: parameters = getProperty(request, FIELD_SET).map(parameters::withFieldSet) .orElse(parameters); break; case ROUTE: parameters = getProperty(request, ROUTE).map(parameters::withRoute) .orElse(parameters); break; default: throw new IllegalArgumentException("Unrecognized document operation parameter name '" + name + "'"); } return parameters; } /** Dispatches enqueued requests until one is blocked. */ void dispatchEnqueued() { try { while (dispatchFirst()); } catch (Exception e) { log.log(WARNING, "Uncaught exception in /document/v1 dispatch thread", e); } } /** Attempts to dispatch the first enqueued operations, and returns whether this was successful. */ private boolean dispatchFirst() { Operation operation = operations.poll(); if (operation == null) return false; if (operation.dispatch()) { enqueued.decrementAndGet(); return true; } operations.push(operation); return false; } /** Dispatches enqueued requests until one is blocked. */ void dispatchVisitEnqueued() { try { while (dispatchFirstVisit()); } catch (Exception e) { log.log(WARNING, "Uncaught exception in /document/v1 dispatch thread", e); } } /** Attempts to dispatch the first enqueued visit operations, and returns whether this was successful. */ private boolean dispatchFirstVisit() { BooleanSupplier operation = visitOperations.poll(); if (operation == null) return false; if (operation.getAsBoolean()) return true; visitOperations.push(operation); return false; } /** * Enqueues the given request and operation, or responds with "overload" if the queue is full, * and then attempts to dispatch an enqueued operation from the head of the queue. */ private void enqueueAndDispatch(HttpRequest request, ResponseHandler handler, Supplier<BooleanSupplier> operationParser) { if (enqueued.incrementAndGet() > maxThrottled) { enqueued.decrementAndGet(); overload(request, "Rejecting execution due to overload: " + maxThrottled + " requests already enqueued", handler); return; } operations.offer(new Operation(request, handler) { @Override BooleanSupplier parse() { return operationParser.get(); } }); dispatchFirst(); } /** Class for writing and returning JSON responses to document operations in a thread safe manner. */ private static class JsonResponse implements AutoCloseable { private final BufferedContentChannel buffer = new BufferedContentChannel(); private final OutputStream out = new ContentChannelOutputStream(buffer); private final JsonGenerator json = jsonFactory.createGenerator(out); private final ResponseHandler handler; private ContentChannel channel; private JsonResponse(ResponseHandler handler) throws IOException { this.handler = handler; json.writeStartObject(); } /** Creates a new JsonResponse with path and id fields written. */ static JsonResponse create(DocumentPath path, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(path.rawPath()); response.writeDocId(path.id()); return response; } /** Creates a new JsonResponse with path field written. */ static JsonResponse create(HttpRequest request, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); return response; } /** Creates a new JsonResponse with path and message fields written. */ static JsonResponse create(HttpRequest request, String message, ResponseHandler handler) throws IOException { JsonResponse response = new JsonResponse(handler); response.writePathId(request.getUri().getRawPath()); response.writeMessage(message); return response; } /** Commits a response with the given status code and some default headers, and writes whatever content is buffered. */ synchronized void commit(int status) throws IOException { Response response = new Response(status); response.headers().addAll(Map.of("Content-Type", List.of("application/json; charset=UTF-8"))); try { channel = handler.handleResponse(response); buffer.connectTo(channel); } catch (RuntimeException e) { throw new IOException(e); } } /** Commits a response with the given status code and some default headers, writes buffered content, and closes this. */ synchronized void respond(int status) throws IOException { try (this) { commit(status); } } /** Closes the JSON and the output content channel of this. */ @Override public synchronized void close() throws IOException { try { if (channel == null) { log.log(WARNING, "Close called before response was committed, in " + getClass().getName()); commit(Response.Status.INTERNAL_SERVER_ERROR); } json.close(); out.close(); } finally { if (channel != null) channel.close(logException); } } synchronized void writePathId(String path) throws IOException { json.writeStringField("pathId", path); } synchronized void writeMessage(String message) throws IOException { json.writeStringField("message", message); } synchronized void writeDocId(DocumentId id) throws IOException { json.writeStringField("id", id.toString()); } synchronized void writeTrace(Trace trace) throws IOException { if (trace != null && ! trace.getRoot().isEmpty()) { writeTrace(trace.getRoot()); } } private void writeTrace(TraceNode node) throws IOException { if (node.hasNote()) json.writeStringField("message", node.getNote()); if ( ! node.isLeaf()) { json.writeArrayFieldStart(node.isStrict() ? "trace" : "fork"); for (int i = 0; i < node.getNumChildren(); i++) { json.writeStartObject(); writeTrace(node.getChild(i)); json.writeEndObject(); } json.writeEndArray(); } } synchronized void writeSingleDocument(Document document) throws IOException { new JsonWriter(json).writeFields(document); } synchronized void writeDocumentsArrayStart() throws IOException { json.writeArrayFieldStart("documents"); } synchronized void writeDocumentValue(Document document) { new JsonWriter(json).write(document); } synchronized void writeArrayEnd() throws IOException { json.writeEndArray(); } synchronized void writeContinuation(String token) throws IOException { json.writeStringField("continuation", token); } } private static void options(Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { Response response = new Response(Response.Status.NO_CONTENT); response.headers().add("Allow", methods.stream().sorted().map(Method::name).collect(joining(","))); handler.handleResponse(response).close(logException); }); } private static void badRequest(HttpRequest request, IllegalArgumentException e, ResponseHandler handler) { loggingException(() -> { String message = Exceptions.toMessageString(e); log.log(FINE, () -> "Bad request for " + request.getMethod() + " at " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.BAD_REQUEST); }); } private static void notFound(HttpRequest request, Collection<String> paths, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "Nothing at '" + request.getUri().getRawPath() + "'. " + "Available paths are:\n" + String.join("\n", paths), handler) .respond(Response.Status.NOT_FOUND); }); } private static void methodNotAllowed(HttpRequest request, Collection<Method> methods, ResponseHandler handler) { loggingException(() -> { JsonResponse.create(request, "'" + request.getMethod() + "' not allowed at '" + request.getUri().getRawPath() + "'. " + "Allowed methods are: " + methods.stream().sorted().map(Method::name).collect(joining(", ")), handler) .respond(Response.Status.METHOD_NOT_ALLOWED); }); } private static void overload(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Overload handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.TOO_MANY_REQUESTS); }); } private static void serverError(HttpRequest request, Throwable t, ResponseHandler handler) { loggingException(() -> { log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ":", t); JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.INTERNAL_SERVER_ERROR); }); } private static void timeout(HttpRequest request, String message, ResponseHandler handler) { loggingException(() -> { log.log(FINE, () -> "Timeout handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message); JsonResponse.create(request, message, handler).respond(Response.Status.GATEWAY_TIMEOUT); }); } private static void loggingException(RunnableThrowingIOException runnable) { try { runnable.run(); } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static abstract class Operation { private final Lock lock = new ReentrantLock(); private final HttpRequest request; private final ResponseHandler handler; private BooleanSupplier operation; Operation(HttpRequest request, ResponseHandler handler) { this.request = request; this.handler = handler; } /** * Attempts to dispatch this operation to the document API, and returns whether this completed or not. * Returns {@code} true if dispatch was successful, or if it failed fatally; or {@code false} if * dispatch should be retried at a later time. */ boolean dispatch() { if (request.isCancelled()) return true; if ( ! lock.tryLock()) throw new IllegalStateException("Concurrent attempts at dispatch — this is a bug"); try { if (operation == null) operation = parse(); return operation.getAsBoolean(); } catch (IllegalArgumentException e) { badRequest(request, e, handler); } catch (RuntimeException e) { serverError(request, e, handler); } finally { lock.unlock(); } return true; } abstract BooleanSupplier parse(); } /** Attempts to send the given document operation, returning false if this needs to be retried. */ private boolean dispatchOperation(Supplier<Result> documentOperation) { Result result = documentOperation.get(); if (result.type() == Result.ResultType.TRANSIENT_ERROR) return false; if (result.type() == Result.ResultType.FATAL_ERROR) throw new RuntimeException(result.getError()); outstanding.incrementAndGet(); return true; } /** Readable content channel which forwards data to a reader when closed. */ static class ForwardingContentChannel implements ContentChannel { private final ReadableContentChannel delegate = new ReadableContentChannel(); private final Consumer<InputStream> reader; public ForwardingContentChannel(Consumer<InputStream> reader) { this.reader = reader; } /** Write is complete when we have stored the buffer — call completion handler. */ @Override public void write(ByteBuffer buf, CompletionHandler handler) { try { delegate.write(buf, logException); handler.completed(); } catch (Exception e) { handler.failed(e); } } /** Close is complete when we have closed the buffer. */ @Override public void close(CompletionHandler handler) { try { delegate.close(logException); reader.accept(new UnsafeContentInputStream(delegate)); handler.completed(); } catch (Exception e) { handler.failed(e); } } } static class DocumentOperationParser { private final DocumentTypeManager manager; DocumentOperationParser(DocumentmanagerConfig config) { this.manager = new DocumentTypeManager(config); } DocumentPut parsePut(InputStream inputStream, String docId) { return (DocumentPut) parse(inputStream, docId, DocumentOperationType.PUT); } DocumentUpdate parseUpdate(InputStream inputStream, String docId) { return (DocumentUpdate) parse(inputStream, docId, DocumentOperationType.UPDATE); } private DocumentOperation parse(InputStream inputStream, String docId, DocumentOperationType operation) { return new JsonReader(manager, inputStream, jsonFactory).readSingleDocument(operation, docId); } } interface SuccessCallback { void onSuccess(Document document, JsonResponse response) throws IOException; } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response, SuccessCallback callback) { try (JsonResponse jsonResponse = JsonResponse.create(path, handler)) { jsonResponse.writeTrace(response.getTrace()); if (response.isSuccess()) callback.onSuccess((response instanceof DocumentResponse) ? ((DocumentResponse) response).getDocument() : null, jsonResponse); else { jsonResponse.writeMessage(response.getTextMessage()); switch (response.outcome()) { case NOT_FOUND: jsonResponse.commit(Response.Status.NOT_FOUND); break; case CONDITION_FAILED: jsonResponse.commit(Response.Status.PRECONDITION_FAILED); break; case INSUFFICIENT_STORAGE: jsonResponse.commit(Response.Status.INSUFFICIENT_STORAGE); break; default: log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'"); case ERROR: log.log(FINE, () -> "Exception performing document operation: " + response.getTextMessage()); jsonResponse.commit(Response.Status.INTERNAL_SERVER_ERROR); } } } catch (Exception e) { log.log(FINE, "Failed writing response", e); } } private static void handle(DocumentPath path, ResponseHandler handler, com.yahoo.documentapi.Response response) { handle(path, handler, response, (document, jsonResponse) -> jsonResponse.commit(Response.Status.OK)); } private VisitorParameters parseParameters(HttpRequest request, DocumentPath path) { int wantedDocumentCount = Math.min(1 << 10, getProperty(request, WANTED_DOCUMENT_COUNT, integerParser).orElse(1)); if (wantedDocumentCount <= 0) throw new IllegalArgumentException("wantedDocumentCount must be positive"); int concurrency = Math.min(100, getProperty(request, CONCURRENCY, integerParser).orElse(1)); if (concurrency <= 0) throw new IllegalArgumentException("concurrency must be positive"); Optional<String> cluster = getProperty(request, CLUSTER); if (cluster.isEmpty() && path.documentType().isEmpty()) throw new IllegalArgumentException("Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level"); VisitorParameters parameters = new VisitorParameters(Stream.of(getProperty(request, SELECTION), path.documentType(), path.namespace().map(value -> "id.namespace=='" + value + "'"), path.group().map(Group::selection)) .flatMap(Optional::stream) .reduce(new StringJoiner(") and (", "(", ")").setEmptyValue(""), StringJoiner::add, StringJoiner::merge) .toString()); getProperty(request, CONTINUATION).map(ProgressToken::fromSerializedString).ifPresent(parameters::setResumeToken); parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(AllFields.NAME))); parameters.setMaxTotalHits(wantedDocumentCount); parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(concurrency)); parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - 5000)); parameters.visitInconsistentBuckets(true); parameters.setPriority(DocumentProtocol.Priority.NORMAL_4); StorageCluster storageCluster = resolveCluster(cluster, clusters); parameters.setRoute(storageCluster.route()); parameters.setBucketSpace(resolveBucket(storageCluster, path.documentType(), List.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()), getProperty(request, BUCKET_SPACE))); return parameters; } private interface VisitCallback { /** Called at the start of response rendering. */ default void onStart(JsonResponse response) throws IOException { } /** Called for every document received from backend visitors — must call the ack for these to proceed. */ default void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) { } /** Called at the end of response rendering, before generic status data is written. */ default void onEnd(JsonResponse response) throws IOException { } } private void visitAndDelete(HttpRequest request, VisitorParameters parameters, ResponseHandler handler, TestAndSetCondition condition, Optional<String> route) { visitAndProcess(request, parameters, handler, route, (id, operationParameters) -> { DocumentRemove remove = new DocumentRemove(id); remove.setCondition(condition); return asyncSession.remove(remove, operationParameters); }); } private void visitAndUpdate(HttpRequest request, VisitorParameters parameters, ResponseHandler handler, DocumentUpdate protoUpdate, Optional<String> route) { visitAndProcess(request, parameters, handler, route, (id, operationParameters) -> { DocumentUpdate update = new DocumentUpdate(protoUpdate); update.setId(id); return asyncSession.update(update, operationParameters); }); } private void visitAndProcess(HttpRequest request, VisitorParameters parameters, ResponseHandler handler, Optional<String> route, BiFunction<DocumentId, DocumentOperationParameters, Result> operation) { visit(request, parameters, handler, new VisitCallback() { @Override public void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) { DocumentOperationParameters operationParameters = (route.isEmpty() ? parameters() : parameters().withRoute(route.get())) .withResponseHandler(operationResponse -> { outstanding.decrementAndGet(); switch (operationResponse.outcome()) { case SUCCESS: case NOT_FOUND: case CONDITION_FAILED: break; case ERROR: case INSUFFICIENT_STORAGE: onError.accept(operationResponse.getTextMessage()); break; default: onError.accept("Unexpected response " + operationResponse); } }); visitOperations.offer(() -> { Result result = operation.apply(document.getId(), operationParameters); if (result.type() == Result.ResultType.TRANSIENT_ERROR) return false; if (result.type() == Result.ResultType.FATAL_ERROR) onError.accept(result.getError().getMessage()); else outstanding.incrementAndGet(); ack.run(); return true; }); dispatchFirstVisit(); } }); } private void visitAndWrite(HttpRequest request, VisitorParameters parameters, ResponseHandler handler) { visit(request, parameters, handler, new VisitCallback() { @Override public void onStart(JsonResponse response) throws IOException { response.writeDocumentsArrayStart(); } @Override public void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) { response.writeDocumentValue(document); ack.run(); } @Override public void onEnd(JsonResponse response) throws IOException { response.writeArrayEnd(); } }); } private void visitWithRemote(HttpRequest request, VisitorParameters parameters, ResponseHandler handler) { visit(request, parameters, handler, new VisitCallback() { }); } private void visit(HttpRequest request, VisitorParameters parameters, ResponseHandler handler, VisitCallback callback) { try { JsonResponse response = JsonResponse.create(request, handler); Phaser phaser = new Phaser(2); AtomicReference<String> error = new AtomicReference<>(); callback.onStart(response); VisitorControlHandler controller = new VisitorControlHandler() { @Override public void onDone(CompletionCode code, String message) { super.onDone(code, message); loggingException(() -> { callback.onEnd(response); switch (code) { case TIMEOUT: if ( ! hasVisitedAnyBuckets()) { response.writeMessage("No buckets visited within timeout of " + parameters.getSessionTimeoutMs() + "ms (request timeout -5s)"); response.respond(Response.Status.GATEWAY_TIMEOUT); break; } case SUCCESS: case ABORTED: if (error.get() == null) { if (getProgress() != null && ! getProgress().isFinished()) response.writeContinuation(getProgress().serializeToString()); response.respond(Response.Status.OK); break; } default: response.writeMessage(error.get() != null ? error.get() : message != null ? message : "Visiting failed"); response.respond(Response.Status.INTERNAL_SERVER_ERROR); } visitDispatcher.execute(() -> { phaser.arriveAndAwaitAdvance(); visits.remove(this).destroy(); }); }); } }; if (parameters.getRemoteDataHandler() == null) { parameters.setLocalDataHandler(new VisitorDataHandler() { @Override public void onMessage(Message m, AckToken token) { if (m instanceof PutDocumentMessage) callback.onDocument(response, ((PutDocumentMessage) m).getDocumentPut().getDocument(), () -> ack(token), errorMessage -> { error.set(errorMessage); controller.abort(); }); else throw new UnsupportedOperationException("Only PutDocumentMessage is supported, but got a " + m.getClass()); } }); } parameters.setControlHandler(controller); visits.put(controller, access.createVisitorSession(parameters)); phaser.arriveAndDeregister(); } catch (ParseException e) { badRequest(request, new IllegalArgumentException(e), handler); } catch (IOException e) { log.log(FINE, "Failed writing response", e); } } /** Returns the last property with the given name, if present, or throws if this is empty or blank. */ private static Optional<String> getProperty(HttpRequest request, String name) { if ( ! request.parameters().containsKey(name)) return Optional.empty(); List<String> values = request.parameters().get(name); String value; if (values == null || values.isEmpty() || (value = values.get(values.size() - 1)) == null || value.isEmpty()) throw new IllegalArgumentException("Expected non-empty value for request property '" + name + "'"); return Optional.of(value); } private static <T> Optional<T> getProperty(HttpRequest request, String name, Parser<T> parser) { return getProperty(request, name).map(parser::parse); } @FunctionalInterface interface Parser<T> extends Function<String, T> { default T parse(String value) { try { return apply(value); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing '" + value + "': " + Exceptions.toMessageString(e)); } } } private class MeasuringResponseHandler implements ResponseHandler { private final ResponseHandler delegate; private final com.yahoo.documentapi.metrics.DocumentOperationType type; private final Instant start; private MeasuringResponseHandler(ResponseHandler delegate, com.yahoo.documentapi.metrics.DocumentOperationType type, Instant start) { this.delegate = delegate; this.type = type; this.start = start; } @Override public ContentChannel handleResponse(Response response) { switch (response.getStatus() / 100) { case 2: metrics.reportSuccessful(type, start); break; case 4: metrics.reportFailure(type, DocumentOperationStatus.REQUEST_ERROR); break; case 5: metrics.reportFailure(type, DocumentOperationStatus.SERVER_ERROR); break; } return delegate.handleResponse(response); } } static class StorageCluster { private final String name; private final Map<String, String> documentBuckets; StorageCluster(String name, Map<String, String> documentBuckets) { this.name = requireNonNull(name); this.documentBuckets = Map.copyOf(documentBuckets); } String name() { return name; } String route() { return "[Content:cluster=" + name() + "]"; } Optional<String> bucketOf(String documentType) { return Optional.ofNullable(documentBuckets.get(documentType)); } } private static Map<String, StorageCluster> parseClusters(ClusterListConfig clusters, AllClustersBucketSpacesConfig buckets) { return clusters.storage().stream() .collect(toUnmodifiableMap(storage -> storage.name(), storage -> new StorageCluster(storage.name(), buckets.cluster(storage.name()) .documentType().entrySet().stream() .collect(toMap(entry -> entry.getKey(), entry -> entry.getValue().bucketSpace()))))); } static StorageCluster resolveCluster(Optional<String> wanted, Map<String, StorageCluster> clusters) { if (clusters.isEmpty()) throw new IllegalArgumentException("Your Vespa deployment has no content clusters, so the document API is not enabled"); return wanted.map(cluster -> { if ( ! clusters.containsKey(cluster)) throw new IllegalArgumentException("Your Vespa deployment has no content cluster '" + cluster + "', only '" + String.join("', '", clusters.keySet()) + "'"); return clusters.get(cluster); }).orElseGet(() -> { if (clusters.size() > 1) throw new IllegalArgumentException("Please specify one of the content clusters in your Vespa deployment: '" + String.join("', '", clusters.keySet()) + "'"); return clusters.values().iterator().next(); }); } static String resolveBucket(StorageCluster cluster, Optional<String> documentType, List<String> bucketSpaces, Optional<String> bucketSpace) { return documentType.map(type -> cluster.bucketOf(type) .orElseThrow(() -> new IllegalArgumentException("Document type '" + type + "' in cluster '" + cluster.name() + "' is not mapped to a known bucket space"))) .or(() -> bucketSpace.map(space -> { if ( ! bucketSpaces.contains(space)) throw new IllegalArgumentException("Bucket space '" + space + "' is not a known bucket space; expected one of " + String.join(", ", bucketSpaces)); return space; })) .orElse(FixedBucketSpaces.defaultSpace()); } private static class DocumentPath { private final Path path; private final Optional<Group> group; DocumentPath(Path path) { this.path = requireNonNull(path); this.group = Optional.ofNullable(path.get("number")).map(integerParser::parse).map(Group::of) .or(() -> Optional.ofNullable(path.get("group")).map(Group::of)); } DocumentId id() { return new DocumentId("id:" + requireNonNull(path.get("namespace")) + ":" + requireNonNull(path.get("documentType")) + ":" + group.map(Group::docIdPart).orElse("") + ":" + requireNonNull(path.getRest())); } String rawPath() { return path.asString(); } Optional<String> documentType() { return Optional.ofNullable(path.get("documentType")); } Optional<String> namespace() { return Optional.ofNullable(path.get("namespace")); } Optional<Group> group() { return group; } } static class Group { private final String value; private final String docIdPart; private final String selection; private Group(String value, String docIdPart, String selection) { Text.validateTextString(value) .ifPresent(codePoint -> { throw new IllegalArgumentException(String.format("Illegal code point U%04X in group", codePoint)); }); this.value = value; this.docIdPart = docIdPart; this.selection = selection; } public static Group of(long value) { return new Group(Long.toString(value), "n=" + value, "id.user==" + value); } public static Group of(String value) { return new Group(value, "g=" + value, "id.group=='" + value.replaceAll("'", "\\\\'") + "'"); } public String value() { return value; } public String docIdPart() { return docIdPart; } public String selection() { return selection; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Group group = (Group) o; return value.equals(group.value) && docIdPart.equals(group.docIdPart) && selection.equals(group.selection); } @Override public int hashCode() { return Objects.hash(value, docIdPart, selection); } @Override public String toString() { return "Group{" + "value='" + value + '\'' + ", docIdPart='" + docIdPart + '\'' + ", selection='" + selection + '\'' + '}'; } } }
```suggestion query.getModel().getQueryTree().setRoot(replaceOrItems(root, hits)); if (root != query.getModel().getQueryTree().getRoot()) query.trace("Replaced OR by WeakAnd", true, 2); ```
private void replaceOrItems(Query query) { Item root = query.getModel().getQueryTree().getRoot(); int hits = query.properties().getInteger("wand.hits", WeakAndItem.defaultN); query.getModel().getQueryTree().setRoot(replaceOrItems(root, hits)); }
query.getModel().getQueryTree().setRoot(replaceOrItems(root, hits));
private void replaceOrItems(Query query) { Item root = query.getModel().getQueryTree().getRoot(); int hits = query.properties().getInteger("wand.hits", WeakAndItem.defaultN); query.getModel().getQueryTree().setRoot(replaceOrItems(root, hits)); if (root != query.getModel().getQueryTree().getRoot()) query.trace("Replaced OR by WeakAnd", true, 2); }
class WeakAndReplacementSearcher extends Searcher { private static final CompoundName WEAKAND_REPLACE = new CompoundName("weakand.replace"); @Override public Result search(Query query, Execution execution) { if (!query.properties().getBoolean(WEAKAND_REPLACE)) { return execution.search(query); } replaceOrItems(query); return execution.search(query); } /** * Extracts the queryTree root and the wand.hits property to send to the recursive replacement function * @param query the search query */ /** * Recursively iterates over an Item to replace all instances of OrItems with WeakAndItems * @param item the current item in the replacement iteration * @param hits the wand.hits property from the request which is assigned to the N value of the new WeakAndItem * @return The original item or a WeakAndItem replacement of an OrItem */ private Item replaceOrItems(Item item, int hits) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem compositeItem = (CompositeItem) item; if (compositeItem instanceof OrItem) { WeakAndItem newItem = new WeakAndItem(hits); newItem.setWeight(compositeItem.getWeight()); compositeItem.items().forEach(newItem::addItem); compositeItem = newItem; } for (int i = 0; i < compositeItem.getItemCount(); i++) { Item subItem = compositeItem.getItem(i); Item replacedItem = replaceOrItems(subItem, hits); if (replacedItem != subItem) { compositeItem.setItem(i, replacedItem); } } return compositeItem; } }
class WeakAndReplacementSearcher extends Searcher { private static final CompoundName WEAKAND_REPLACE = new CompoundName("weakAnd.replace"); @Override public Result search(Query query, Execution execution) { if (!query.properties().getBoolean(WEAKAND_REPLACE)) { return execution.search(query); } replaceOrItems(query); return execution.search(query); } /** * Extracts the queryTree root and the wand.hits property to send to the recursive replacement function * @param query the search query */ /** * Recursively iterates over an Item to replace all instances of OrItems with WeakAndItems * @param item the current item in the replacement iteration * @param hits the wand.hits property from the request which is assigned to the N value of the new WeakAndItem * @return The original item or a WeakAndItem replacement of an OrItem */ private Item replaceOrItems(Item item, int hits) { if (!(item instanceof CompositeItem)) { return item; } CompositeItem compositeItem = (CompositeItem) item; if (compositeItem instanceof OrItem) { WeakAndItem newItem = new WeakAndItem(hits); newItem.setWeight(compositeItem.getWeight()); compositeItem.items().forEach(newItem::addItem); compositeItem = newItem; } for (int i = 0; i < compositeItem.getItemCount(); i++) { Item subItem = compositeItem.getItem(i); Item replacedItem = replaceOrItems(subItem, hits); if (replacedItem != subItem) { compositeItem.setItem(i, replacedItem); } } return compositeItem; } }
Consider adding a test that has multiple exhaustions for one node
public void nodes_above_limit_is_equal_to_node_resource_exhaustions() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), createFeedBlock(exhaustion(1, "disk"), exhaustion(2, "memory"))); assertEquals(2, stats.getNodesAboveLimit()); }
}
public void nodes_above_limit_is_equal_to_node_resource_exhaustions() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), createFeedBlock(exhaustion(1, "disk"), exhaustion(2, "memory"))); assertEquals(2, stats.getNodesAboveLimit()); }
class ResourceUsageStatsTest { private final double DELTA = 0.00001; @Test public void disk_and_memory_utilization_is_max_among_all_content_nodes() { var stats = ResourceUsageStats.calculateFrom(createNodeInfo( forNode(1, usage("disk", 0.3), usage("memory", 0.6)), forNode(2, usage("disk", 0.4), usage("memory", 0.5))), createFeedBlockLimits(0.8, 0.9), Optional.empty()); assertEquals(0.4 / 0.8, stats.getMaxDiskUtilization(), DELTA); assertEquals(0.6 / 0.9, stats.getMaxMemoryUtilization(), DELTA); } @Test public void disk_and_memory_utilization_is_zero_if_no_samples_are_available() { var stats = ResourceUsageStats.calculateFrom(createNodeInfo( forNode(1), forNode(2)), createFeedBlockLimits(0.8, 0.9), Optional.empty()); assertEquals(0.0, stats.getMaxDiskUtilization(), DELTA); assertEquals(0.0, stats.getMaxMemoryUtilization(), DELTA); } @Test public void nodes_above_limit_is_zero_without_feed_block_status() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), Optional.empty()); assertEquals(0, stats.getNodesAboveLimit()); } @Test private static Collection<NodeInfo> createNodeInfo(FeedBlockUtil.NodeAndUsages... nodeAndUsages) { return createFixtureWithReportedUsages(nodeAndUsages).cluster().getNodeInfo(); } private static Map<String, Double> createFeedBlockLimits(double diskLimit, double memoryLimit) { return Map.of("disk", diskLimit, "memory", memoryLimit); } private static Optional<ClusterStateBundle.FeedBlock> createFeedBlock(NodeResourceExhaustion... exhaustions) { return Optional.of(new ClusterStateBundle.FeedBlock(true, "", setOf(exhaustions))); } }
class ResourceUsageStatsTest { private final double DELTA = 0.00001; @Test public void disk_and_memory_utilization_is_max_among_all_content_nodes() { var stats = ResourceUsageStats.calculateFrom(createNodeInfo( forNode(1, usage("disk", 0.3), usage("memory", 0.6)), forNode(2, usage("disk", 0.4), usage("memory", 0.5))), createFeedBlockLimits(0.8, 0.9), Optional.empty()); assertEquals(0.4 / 0.8, stats.getMaxDiskUtilization(), DELTA); assertEquals(0.6 / 0.9, stats.getMaxMemoryUtilization(), DELTA); } @Test public void disk_and_memory_utilization_is_zero_if_no_samples_are_available() { var stats = ResourceUsageStats.calculateFrom(createNodeInfo( forNode(1), forNode(2)), createFeedBlockLimits(0.8, 0.9), Optional.empty()); assertEquals(0.0, stats.getMaxDiskUtilization(), DELTA); assertEquals(0.0, stats.getMaxMemoryUtilization(), DELTA); } @Test public void nodes_above_limit_is_zero_without_feed_block_status() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), Optional.empty()); assertEquals(0, stats.getNodesAboveLimit()); } @Test @Test public void nodes_above_limit_counts_each_node_only_once() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), createFeedBlock(exhaustion(1, "disk"), exhaustion(1, "memory"))); assertEquals(1, stats.getNodesAboveLimit()); } private static Collection<NodeInfo> createNodeInfo(FeedBlockUtil.NodeAndUsages... nodeAndUsages) { return createFixtureWithReportedUsages(nodeAndUsages).cluster().getNodeInfo(); } private static Map<String, Double> createFeedBlockLimits(double diskLimit, double memoryLimit) { return Map.of("disk", diskLimit, "memory", memoryLimit); } private static Optional<ClusterStateBundle.FeedBlock> createFeedBlock(NodeResourceExhaustion... exhaustions) { return Optional.of(new ClusterStateBundle.FeedBlock(true, "", setOf(exhaustions))); } }
Fixed
public void nodes_above_limit_is_equal_to_node_resource_exhaustions() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), createFeedBlock(exhaustion(1, "disk"), exhaustion(2, "memory"))); assertEquals(2, stats.getNodesAboveLimit()); }
}
public void nodes_above_limit_is_equal_to_node_resource_exhaustions() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), createFeedBlock(exhaustion(1, "disk"), exhaustion(2, "memory"))); assertEquals(2, stats.getNodesAboveLimit()); }
class ResourceUsageStatsTest { private final double DELTA = 0.00001; @Test public void disk_and_memory_utilization_is_max_among_all_content_nodes() { var stats = ResourceUsageStats.calculateFrom(createNodeInfo( forNode(1, usage("disk", 0.3), usage("memory", 0.6)), forNode(2, usage("disk", 0.4), usage("memory", 0.5))), createFeedBlockLimits(0.8, 0.9), Optional.empty()); assertEquals(0.4 / 0.8, stats.getMaxDiskUtilization(), DELTA); assertEquals(0.6 / 0.9, stats.getMaxMemoryUtilization(), DELTA); } @Test public void disk_and_memory_utilization_is_zero_if_no_samples_are_available() { var stats = ResourceUsageStats.calculateFrom(createNodeInfo( forNode(1), forNode(2)), createFeedBlockLimits(0.8, 0.9), Optional.empty()); assertEquals(0.0, stats.getMaxDiskUtilization(), DELTA); assertEquals(0.0, stats.getMaxMemoryUtilization(), DELTA); } @Test public void nodes_above_limit_is_zero_without_feed_block_status() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), Optional.empty()); assertEquals(0, stats.getNodesAboveLimit()); } @Test private static Collection<NodeInfo> createNodeInfo(FeedBlockUtil.NodeAndUsages... nodeAndUsages) { return createFixtureWithReportedUsages(nodeAndUsages).cluster().getNodeInfo(); } private static Map<String, Double> createFeedBlockLimits(double diskLimit, double memoryLimit) { return Map.of("disk", diskLimit, "memory", memoryLimit); } private static Optional<ClusterStateBundle.FeedBlock> createFeedBlock(NodeResourceExhaustion... exhaustions) { return Optional.of(new ClusterStateBundle.FeedBlock(true, "", setOf(exhaustions))); } }
class ResourceUsageStatsTest { private final double DELTA = 0.00001; @Test public void disk_and_memory_utilization_is_max_among_all_content_nodes() { var stats = ResourceUsageStats.calculateFrom(createNodeInfo( forNode(1, usage("disk", 0.3), usage("memory", 0.6)), forNode(2, usage("disk", 0.4), usage("memory", 0.5))), createFeedBlockLimits(0.8, 0.9), Optional.empty()); assertEquals(0.4 / 0.8, stats.getMaxDiskUtilization(), DELTA); assertEquals(0.6 / 0.9, stats.getMaxMemoryUtilization(), DELTA); } @Test public void disk_and_memory_utilization_is_zero_if_no_samples_are_available() { var stats = ResourceUsageStats.calculateFrom(createNodeInfo( forNode(1), forNode(2)), createFeedBlockLimits(0.8, 0.9), Optional.empty()); assertEquals(0.0, stats.getMaxDiskUtilization(), DELTA); assertEquals(0.0, stats.getMaxMemoryUtilization(), DELTA); } @Test public void nodes_above_limit_is_zero_without_feed_block_status() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), Optional.empty()); assertEquals(0, stats.getNodesAboveLimit()); } @Test @Test public void nodes_above_limit_counts_each_node_only_once() { var stats = ResourceUsageStats.calculateFrom(Collections.emptyList(), Collections.emptyMap(), createFeedBlock(exhaustion(1, "disk"), exhaustion(1, "memory"))); assertEquals(1, stats.getNodesAboveLimit()); } private static Collection<NodeInfo> createNodeInfo(FeedBlockUtil.NodeAndUsages... nodeAndUsages) { return createFixtureWithReportedUsages(nodeAndUsages).cluster().getNodeInfo(); } private static Map<String, Double> createFeedBlockLimits(double diskLimit, double memoryLimit) { return Map.of("disk", diskLimit, "memory", memoryLimit); } private static Optional<ClusterStateBundle.FeedBlock> createFeedBlock(NodeResourceExhaustion... exhaustions) { return Optional.of(new ClusterStateBundle.FeedBlock(true, "", setOf(exhaustions))); } }
Should check `isHost()` instead.
public List<NodeAcl> getChildAcls(Node host) { if ( host.type() != NodeType.host) throw new IllegalArgumentException("Only hosts have children"); NodeList allNodes = list(); return list().childrenOf(host).asList().stream() .map(childNode -> childNode.acl(allNodes, loadBalancers)) .collect(Collectors.toUnmodifiableList()); }
if ( host.type() != NodeType.host) throw new IllegalArgumentException("Only hosts have children");
public List<NodeAcl> getChildAcls(Node host) { if ( ! host.type().isHost()) throw new IllegalArgumentException("Only hosts have children"); NodeList allNodes = list(); return list().childrenOf(host).asList().stream() .map(childNode -> childNode.acl(allNodes, loadBalancers)) .collect(Collectors.toUnmodifiableList()); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final LoadBalancers loadBalancers; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.loadBalancers = new LoadBalancers(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } /** Returns the load balancers available in this node repo */ public LoadBalancers loadBalancers() { return loadBalancers; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Returns ACLs for the children of the given host. * * @param host node for which to generate ACLs * @return the list of node ACLs */ /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (node.state() != State.parked && agent != Agent.operator && (node.status().wantToDeprovision() || retiredByOperator(node))) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } private static boolean retiredByOperator(Node node) { return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(agent -> agent == Agent.operator) .orElse(false); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason, transaction); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(node, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(Node node, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final LoadBalancers loadBalancers; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.loadBalancers = new LoadBalancers(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } /** Returns the load balancers available in this node repo */ public LoadBalancers loadBalancers() { return loadBalancers; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Returns ACLs for the children of the given host. * * @param host node for which to generate ACLs * @return the list of node ACLs */ /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (node.state() != State.parked && agent != Agent.operator && (node.status().wantToDeprovision() || retiredByOperator(node))) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } private static boolean retiredByOperator(Node node) { return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(agent -> agent == Agent.operator) .orElse(false); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason, transaction); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(node, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(Node node, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Done - thanks. I see we have many other NodeType.host comparisons as well. I guess all should likely use isHost(), or have you looked at that before?
public List<NodeAcl> getChildAcls(Node host) { if ( host.type() != NodeType.host) throw new IllegalArgumentException("Only hosts have children"); NodeList allNodes = list(); return list().childrenOf(host).asList().stream() .map(childNode -> childNode.acl(allNodes, loadBalancers)) .collect(Collectors.toUnmodifiableList()); }
if ( host.type() != NodeType.host) throw new IllegalArgumentException("Only hosts have children");
public List<NodeAcl> getChildAcls(Node host) { if ( ! host.type().isHost()) throw new IllegalArgumentException("Only hosts have children"); NodeList allNodes = list(); return list().childrenOf(host).asList().stream() .map(childNode -> childNode.acl(allNodes, loadBalancers)) .collect(Collectors.toUnmodifiableList()); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final LoadBalancers loadBalancers; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.loadBalancers = new LoadBalancers(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } /** Returns the load balancers available in this node repo */ public LoadBalancers loadBalancers() { return loadBalancers; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Returns ACLs for the children of the given host. * * @param host node for which to generate ACLs * @return the list of node ACLs */ /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (node.state() != State.parked && agent != Agent.operator && (node.status().wantToDeprovision() || retiredByOperator(node))) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } private static boolean retiredByOperator(Node node) { return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(agent -> agent == Agent.operator) .orElse(false); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason, transaction); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(node, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(Node node, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final LoadBalancers loadBalancers; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.loadBalancers = new LoadBalancers(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } /** Returns the load balancers available in this node repo */ public LoadBalancers loadBalancers() { return loadBalancers; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Returns ACLs for the children of the given host. * * @param host node for which to generate ACLs * @return the list of node ACLs */ /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (node.state() != State.parked && agent != Agent.operator && (node.status().wantToDeprovision() || retiredByOperator(node))) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } private static boolean retiredByOperator(Node node) { return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(agent -> agent == Agent.operator) .orElse(false); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason, transaction); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(node, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(Node node, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Yes, generally we should use `isHost()`, but I think there is a lot of code that only applies to tenant hosts. I did a quick check on usages in this module, and looks like `NodeType.host` is mostly used in dynamic provisioning, capacity decisions related to node allocation and rebalancing. I.e. cases where you really mean `NodeType.host`.
public List<NodeAcl> getChildAcls(Node host) { if ( host.type() != NodeType.host) throw new IllegalArgumentException("Only hosts have children"); NodeList allNodes = list(); return list().childrenOf(host).asList().stream() .map(childNode -> childNode.acl(allNodes, loadBalancers)) .collect(Collectors.toUnmodifiableList()); }
if ( host.type() != NodeType.host) throw new IllegalArgumentException("Only hosts have children");
public List<NodeAcl> getChildAcls(Node host) { if ( ! host.type().isHost()) throw new IllegalArgumentException("Only hosts have children"); NodeList allNodes = list(); return list().childrenOf(host).asList().stream() .map(childNode -> childNode.acl(allNodes, loadBalancers)) .collect(Collectors.toUnmodifiableList()); }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final LoadBalancers loadBalancers; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.loadBalancers = new LoadBalancers(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } /** Returns the load balancers available in this node repo */ public LoadBalancers loadBalancers() { return loadBalancers; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Returns ACLs for the children of the given host. * * @param host node for which to generate ACLs * @return the list of node ACLs */ /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (node.state() != State.parked && agent != Agent.operator && (node.status().wantToDeprovision() || retiredByOperator(node))) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } private static boolean retiredByOperator(Node node) { return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(agent -> agent == Agent.operator) .orElse(false); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason, transaction); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(node, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(Node node, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
class NodeRepository extends AbstractComponent { private static final Logger log = Logger.getLogger(NodeRepository.class.getName()); private final CuratorDatabaseClient db; private final Clock clock; private final Zone zone; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVersions; private final InfrastructureVersions infrastructureVersions; private final FirmwareChecks firmwareChecks; private final ContainerImages containerImages; private final JobControl jobControl; private final Applications applications; private final LoadBalancers loadBalancers; private final int spareCount; /** * Creates a node repository from a zookeeper provider. * This will use the system time to make time-sensitive decisions */ @Inject public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Zone zone, FlagSource flagSource) { this(flavors, provisionServiceProvider, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.containerImage()) .withReplacedBy(DockerImage.fromString(config.containerImageReplacement())), flagSource, config.useCuratorClientCache(), zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0, config.nodeCacheSize()); } /** * Creates a node repository from a zookeeper provider and a clock instance * which will be used for time-sensitive decisions. */ public NodeRepository(NodeFlavors flavors, ProvisionServiceProvider provisionServiceProvider, Curator curator, Clock clock, Zone zone, NameResolver nameResolver, DockerImage containerImage, FlagSource flagSource, boolean useCuratorClientCache, int spareCount, long nodeCacheSize) { this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize); this.zone = zone; this.clock = clock; this.flavors = flavors; this.resourcesCalculator = provisionServiceProvider.getHostResourcesCalculator(); this.nameResolver = nameResolver; this.osVersions = new OsVersions(this); this.infrastructureVersions = new InfrastructureVersions(db); this.firmwareChecks = new FirmwareChecks(db, clock); this.containerImages = new ContainerImages(db, containerImage); this.jobControl = new JobControl(new JobControlFlags(db, flagSource)); this.applications = new Applications(db); this.loadBalancers = new LoadBalancers(db); this.spareCount = spareCount; rewriteNodes(); } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ private void rewriteNodes() { Instant start = clock.instant(); int nodesWritten = 0; for (State state : State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** Returns the curator database client used by this */ public CuratorDatabaseClient database() { return db; } /** @return The name resolver used to resolve hostname and ip addresses */ public NameResolver nameResolver() { return nameResolver; } /** Returns the OS versions to use for nodes in this */ public OsVersions osVersions() { return osVersions; } /** Returns the infrastructure versions to use for nodes in this */ public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; } /** Returns the status of firmware checks for hosts managed by this. */ public FirmwareChecks firmwareChecks() { return firmwareChecks; } /** Returns the docker images to use for nodes in this. */ public ContainerImages containerImages() { return containerImages; } /** Returns the status of maintenance jobs managed by this. */ public JobControl jobControl() { return jobControl; } /** Returns this node repo's view of the applications deployed to it */ public Applications applications() { return applications; } /** Returns the load balancers available in this node repo */ public LoadBalancers loadBalancers() { return loadBalancers; } public NodeFlavors flavors() { return flavors; } public HostResourcesCalculator resourcesCalculator() { return resourcesCalculator; } /** The number of nodes we should ensure has free capacity for node failures whenever possible */ public int spareCount() { return spareCount; } /** * Returns ACLs for the children of the given host. * * @param host node for which to generate ACLs * @return the list of node ACLs */ /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> getNode(String hostname, State ... inState) { return db.readNode(hostname, inState); } /** * Returns all nodes in any of the given states. * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(State ... inState) { return new ArrayList<>(db.readNodes(inState)); } /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ public List<Node> getNodes(NodeType type, State ... inState) { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } /** Returns a filterable list of nodes in this repository in any of the given states */ public NodeList list(State ... inState) { return NodeList.copyOf(getNodes(inState)); } public NodeList list(ApplicationId application, State ... inState) { return NodeList.copyOf(getNodes(application, inState)); } /** Returns a filterable list of all nodes of an application */ public NodeList list(ApplicationId application) { return NodeList.copyOf(getNodes(application)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(getNodes(), lock); } public List<Node> getNodes(ApplicationId id, State ... inState) { return db.readNodes(id, inState); } public List<Node> getInactive() { return db.readNodes(State.inactive); } public List<Node> getFailed() { return db.readNodes(State.failed); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */ public List<Node> addDockerNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = getNode(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), State.provisioned, agent); db.removeNodes(nodesToRemove); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != State.provisioned && node.state() != State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); if (node.type() == NodeType.host && node.ipConfig().pool().getIpSet().isEmpty()) illegal("Can not set host " + node + " ready. Its IP address pool is empty."); return node.withWantToRetire(false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream().map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { NodeList applicationNodes = list(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, applicationNodes.except(activeNodes.asSet()).asList(), Agent.system, Optional.of("Application is removed"), transaction.nested()); applications.remove(transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = getNode(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != State.provisioned) .filter(node -> node.state() != State.failed) .filter(node -> node.state() != State.parked) .filter(node -> node.state() != State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (node.state() != State.parked && agent != Agent.operator && (node.status().wantToDeprovision() || retiredByOperator(node))) return park(node.hostname(), false, agent, reason, transaction); else return db.writeTo(State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } private static boolean retiredByOperator(Node node) { return node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(agent -> agent == Agent.operator) .orElse(false); } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return move(hostname, true, State.failed, agent, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * * @return List of all the failed nodes in their new state */ public List<Node> failRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.failed, agent, Optional.of(reason)); } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } public Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, keepAllocation, State.parked, agent, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, true, State.active, agent, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); List<Node> removed = removeChildren(node, false); removed.add(move(node, State.breakfixed, agent, Optional.of(reason))); return removed; } } private List<Node> moveRecursively(String hostname, State toState, Agent agent, Optional<String> reason) { List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child, toState, agent, reason)) .collect(Collectors.toList()); moved.add(move(hostname, true, toState, agent, reason)); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, keepAllocation, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(String hostname, boolean keepAllocation, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { Node node = getNode(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } return move(node, toState, agent, reason, transaction); } private Node move(Node node, State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(node, toState, agent, reason, transaction); transaction.commit(); return moved; } private Node move(Node node, State toState, Agent agent, Optional<String> reason, NestedTransaction transaction) { if (toState == Node.State.active && node.allocation().isEmpty()) illegal("Could not set " + node + " active. It has no allocation."); try (Mutex lock = lock(node)) { if (toState == State.active) { for (Node currentActive : getNodes(node.allocation().get().owner(), State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For tenant docker * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); if (node.type().isHost()) { List<Node> removed = removeChildren(node, force); if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host) db.removeNodes(List.of(node)); else { node = node.with(IP.Config.EMPTY); move(node, State.deprovisioned, Agent.system, Optional.empty()); } removed.add(node); return removed; } else { List<Node> removed = List.of(node); db.removeNodes(removed); return removed; } } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); db.removeNodes(List.of(node)); } private List<Node> removeChildren(Node node, boolean force) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: node is unallocated * - Host node: iff in state provisioned|failed|parked * - Child node: * If only removing the container node: node in state ready * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingAsChild, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) illegal(node + " is currently allocated and cannot be removed"); if (!node.type().isHost() && !removingAsChild) { if (node.state() != State.ready) illegal(node + " can not be removed as it is not in the state " + State.ready); } else if (!node.type().isHost()) { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked, State.dirty, State.ready); if ( ! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } else { Set<State> legalStates = EnumSet.of(State.provisioned, State.failed, State.parked); if (! legalStates.contains(node.state())) illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone().getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone()); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<State> legalStates = EnumSet.of(State.failed, State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching the filter. * * @return the nodes in their new state */ public List<Node> restart(NodeFilter filter) { return performOn(StateFilter.from(State.active, filter), (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(NodeFilter filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param filter the filter determining the set of nodes where the operation will be performed * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : db.readNodes()) { if ( ! filter.matches(node)) continue; if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public static boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (dynamicProvisioning) return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state()); else return host.state() == State.active; } /** Returns the time keeper of this system */ public Clock clock() { return clock; } /** Returns the zone of this system */ public Zone zone() { return zone; } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = getNode(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return getNode(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new IllegalArgumentException("No such node: " + node.hostname())); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private void illegal(String message) { throw new IllegalArgumentException(message); } }
Where was this produced earlier on ?
public void getConfig(QrStartConfig.Builder builder) { int maxHeapSize = featureFlags.clusterControllerMaxHeapSizeInMb(); boolean verboseGc = (maxHeapSize < 512); builder.jvm .verbosegc(verboseGc) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(maxHeapSize) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(getJvmGCOptions().orElse(G1GC)); if (getEnvironmentVars() != null) { builder.qrs.env(getEnvironmentVars()); } }
builder.jvm
public void getConfig(QrStartConfig.Builder builder) { super.getConfig(builder); int maxHeapSize = featureFlags.clusterControllerMaxHeapSizeInMb(); boolean verboseGc = (maxHeapSize < 512); builder.jvm .verbosegc(verboseGc) .heapsize(maxHeapSize); }
class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> { private final ModelContext.FeatureFlags featureFlags; private final ReindexingContext reindexingContext; public ClusterControllerContainerCluster( AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId, name, deployState, false); addDefaultHandlersWithVip(); this.featureFlags = deployState.featureFlags(); this.reindexingContext = createReindexingContext(deployState); } @Override protected void doPrepare(DeployState deployState) { } @Override protected boolean messageBusEnabled() { return false; } @Override public ReindexingContext reindexingContext() { return reindexingContext; } private static ReindexingContext createReindexingContext(DeployState deployState) { return new ReindexingContext(deployState.reindexing().orElse(Reindexing.DISABLED_INSTANCE)); } }
class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> { private final ModelContext.FeatureFlags featureFlags; private final ReindexingContext reindexingContext; public ClusterControllerContainerCluster( AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId, name, deployState, false); addDefaultHandlersWithVip(); this.featureFlags = deployState.featureFlags(); this.reindexingContext = createReindexingContext(deployState); } @Override protected void doPrepare(DeployState deployState) { } @Override protected boolean messageBusEnabled() { return false; } @Override public ReindexingContext reindexingContext() { return reindexingContext; } private static ReindexingContext createReindexingContext(DeployState deployState) { return new ReindexingContext(deployState.reindexing().orElse(Reindexing.DISABLED_INSTANCE)); } }
Added in an earlier PR (https://github.com/vespa-engine/vespa/pull/16463), that @baldersheim merged before I added any reviewers, so forgot to add you afterwards.
public FeatureFlags(FlagSource source, ApplicationId appId) { this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT); this.useThreePhaseUpdates = flagValue(source, appId, Flags.USE_THREE_PHASE_UPDATES); this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD); this.useAccessControlTlsHandshakeClientAuth = flagValue(source, appId, Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY); this.reconfigurableZookeeperServer = flagValue(source, appId, Flags.RECONFIGURABLE_ZOOKEEPER_SERVER_FOR_CLUSTER_CONTROLLER); this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT); this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR); this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO); this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB); }
this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB);
public FeatureFlags(FlagSource source, ApplicationId appId) { this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT); this.useThreePhaseUpdates = flagValue(source, appId, Flags.USE_THREE_PHASE_UPDATES); this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD); this.useAccessControlTlsHandshakeClientAuth = flagValue(source, appId, Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY); this.reconfigurableZookeeperServer = flagValue(source, appId, Flags.RECONFIGURABLE_ZOOKEEPER_SERVER_FOR_CLUSTER_CONTROLLER); this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT); this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR); this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO); this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB); }
class FeatureFlags implements ModelContext.FeatureFlags { private final double defaultTermwiseLimit; private final boolean useThreePhaseUpdates; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAccessControlTlsHandshakeClientAuth; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final boolean reconfigurableZookeeperServer; private final boolean useBucketExecutorForLidSpaceCompact; private final boolean enableFeedBlockInDistributor; private final double maxDeadBytesRatio; private final int clusterControllerMaxHeapSizeInMb; @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public boolean reconfigurableZookeeperServer() { return reconfigurableZookeeperServer; } @Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; } @Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; } @Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; } @Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; } private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .boxedValue(); } }
class FeatureFlags implements ModelContext.FeatureFlags { private final double defaultTermwiseLimit; private final boolean useThreePhaseUpdates; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAccessControlTlsHandshakeClientAuth; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final boolean reconfigurableZookeeperServer; private final boolean useBucketExecutorForLidSpaceCompact; private final boolean enableFeedBlockInDistributor; private final double maxDeadBytesRatio; private final int clusterControllerMaxHeapSizeInMb; @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public boolean reconfigurableZookeeperServer() { return reconfigurableZookeeperServer; } @Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; } @Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; } @Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; } @Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; } private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .boxedValue(); } }
ContainerCluster
public void getConfig(QrStartConfig.Builder builder) { int maxHeapSize = featureFlags.clusterControllerMaxHeapSizeInMb(); boolean verboseGc = (maxHeapSize < 512); builder.jvm .verbosegc(verboseGc) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(maxHeapSize) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(getJvmGCOptions().orElse(G1GC)); if (getEnvironmentVars() != null) { builder.qrs.env(getEnvironmentVars()); } }
builder.jvm
public void getConfig(QrStartConfig.Builder builder) { super.getConfig(builder); int maxHeapSize = featureFlags.clusterControllerMaxHeapSizeInMb(); boolean verboseGc = (maxHeapSize < 512); builder.jvm .verbosegc(verboseGc) .heapsize(maxHeapSize); }
class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> { private final ModelContext.FeatureFlags featureFlags; private final ReindexingContext reindexingContext; public ClusterControllerContainerCluster( AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId, name, deployState, false); addDefaultHandlersWithVip(); this.featureFlags = deployState.featureFlags(); this.reindexingContext = createReindexingContext(deployState); } @Override protected void doPrepare(DeployState deployState) { } @Override protected boolean messageBusEnabled() { return false; } @Override public ReindexingContext reindexingContext() { return reindexingContext; } private static ReindexingContext createReindexingContext(DeployState deployState) { return new ReindexingContext(deployState.reindexing().orElse(Reindexing.DISABLED_INSTANCE)); } }
class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> { private final ModelContext.FeatureFlags featureFlags; private final ReindexingContext reindexingContext; public ClusterControllerContainerCluster( AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId, name, deployState, false); addDefaultHandlersWithVip(); this.featureFlags = deployState.featureFlags(); this.reindexingContext = createReindexingContext(deployState); } @Override protected void doPrepare(DeployState deployState) { } @Override protected boolean messageBusEnabled() { return false; } @Override public ReindexingContext reindexingContext() { return reindexingContext; } private static ReindexingContext createReindexingContext(DeployState deployState) { return new ReindexingContext(deployState.reindexing().orElse(Reindexing.DISABLED_INSTANCE)); } }
Would prefer a call to `super.getConfig(builder)`, followed by modifications, to avoid duplication.
public void getConfig(QrStartConfig.Builder builder) { int maxHeapSize = featureFlags.clusterControllerMaxHeapSizeInMb(); boolean verboseGc = (maxHeapSize < 512); builder.jvm .verbosegc(verboseGc) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(maxHeapSize) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(getJvmGCOptions().orElse(G1GC)); if (getEnvironmentVars() != null) { builder.qrs.env(getEnvironmentVars()); } }
builder.jvm
public void getConfig(QrStartConfig.Builder builder) { super.getConfig(builder); int maxHeapSize = featureFlags.clusterControllerMaxHeapSizeInMb(); boolean verboseGc = (maxHeapSize < 512); builder.jvm .verbosegc(verboseGc) .heapsize(maxHeapSize); }
class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> { private final ModelContext.FeatureFlags featureFlags; private final ReindexingContext reindexingContext; public ClusterControllerContainerCluster( AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId, name, deployState, false); addDefaultHandlersWithVip(); this.featureFlags = deployState.featureFlags(); this.reindexingContext = createReindexingContext(deployState); } @Override protected void doPrepare(DeployState deployState) { } @Override protected boolean messageBusEnabled() { return false; } @Override public ReindexingContext reindexingContext() { return reindexingContext; } private static ReindexingContext createReindexingContext(DeployState deployState) { return new ReindexingContext(deployState.reindexing().orElse(Reindexing.DISABLED_INSTANCE)); } }
class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> { private final ModelContext.FeatureFlags featureFlags; private final ReindexingContext reindexingContext; public ClusterControllerContainerCluster( AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId, name, deployState, false); addDefaultHandlersWithVip(); this.featureFlags = deployState.featureFlags(); this.reindexingContext = createReindexingContext(deployState); } @Override protected void doPrepare(DeployState deployState) { } @Override protected boolean messageBusEnabled() { return false; } @Override public ReindexingContext reindexingContext() { return reindexingContext; } private static ReindexingContext createReindexingContext(DeployState deployState) { return new ReindexingContext(deployState.reindexing().orElse(Reindexing.DISABLED_INSTANCE)); } }
Yup, fixed
public void getConfig(QrStartConfig.Builder builder) { int maxHeapSize = featureFlags.clusterControllerMaxHeapSizeInMb(); boolean verboseGc = (maxHeapSize < 512); builder.jvm .verbosegc(verboseGc) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(maxHeapSize) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(getJvmGCOptions().orElse(G1GC)); if (getEnvironmentVars() != null) { builder.qrs.env(getEnvironmentVars()); } }
builder.jvm
public void getConfig(QrStartConfig.Builder builder) { super.getConfig(builder); int maxHeapSize = featureFlags.clusterControllerMaxHeapSizeInMb(); boolean verboseGc = (maxHeapSize < 512); builder.jvm .verbosegc(verboseGc) .heapsize(maxHeapSize); }
class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> { private final ModelContext.FeatureFlags featureFlags; private final ReindexingContext reindexingContext; public ClusterControllerContainerCluster( AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId, name, deployState, false); addDefaultHandlersWithVip(); this.featureFlags = deployState.featureFlags(); this.reindexingContext = createReindexingContext(deployState); } @Override protected void doPrepare(DeployState deployState) { } @Override protected boolean messageBusEnabled() { return false; } @Override public ReindexingContext reindexingContext() { return reindexingContext; } private static ReindexingContext createReindexingContext(DeployState deployState) { return new ReindexingContext(deployState.reindexing().orElse(Reindexing.DISABLED_INSTANCE)); } }
class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> { private final ModelContext.FeatureFlags featureFlags; private final ReindexingContext reindexingContext; public ClusterControllerContainerCluster( AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId, name, deployState, false); addDefaultHandlersWithVip(); this.featureFlags = deployState.featureFlags(); this.reindexingContext = createReindexingContext(deployState); } @Override protected void doPrepare(DeployState deployState) { } @Override protected boolean messageBusEnabled() { return false; } @Override public ReindexingContext reindexingContext() { return reindexingContext; } private static ReindexingContext createReindexingContext(DeployState deployState) { return new ReindexingContext(deployState.reindexing().orElse(Reindexing.DISABLED_INSTANCE)); } }
Yeah, sorry, missed this.
public FeatureFlags(FlagSource source, ApplicationId appId) { this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT); this.useThreePhaseUpdates = flagValue(source, appId, Flags.USE_THREE_PHASE_UPDATES); this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD); this.useAccessControlTlsHandshakeClientAuth = flagValue(source, appId, Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY); this.reconfigurableZookeeperServer = flagValue(source, appId, Flags.RECONFIGURABLE_ZOOKEEPER_SERVER_FOR_CLUSTER_CONTROLLER); this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT); this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR); this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO); this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB); }
this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB);
public FeatureFlags(FlagSource source, ApplicationId appId) { this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT); this.useThreePhaseUpdates = flagValue(source, appId, Flags.USE_THREE_PHASE_UPDATES); this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD); this.useAccessControlTlsHandshakeClientAuth = flagValue(source, appId, Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY); this.reconfigurableZookeeperServer = flagValue(source, appId, Flags.RECONFIGURABLE_ZOOKEEPER_SERVER_FOR_CLUSTER_CONTROLLER); this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT); this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR); this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO); this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB); }
class FeatureFlags implements ModelContext.FeatureFlags { private final double defaultTermwiseLimit; private final boolean useThreePhaseUpdates; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAccessControlTlsHandshakeClientAuth; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final boolean reconfigurableZookeeperServer; private final boolean useBucketExecutorForLidSpaceCompact; private final boolean enableFeedBlockInDistributor; private final double maxDeadBytesRatio; private final int clusterControllerMaxHeapSizeInMb; @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public boolean reconfigurableZookeeperServer() { return reconfigurableZookeeperServer; } @Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; } @Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; } @Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; } @Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; } private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .boxedValue(); } }
class FeatureFlags implements ModelContext.FeatureFlags { private final double defaultTermwiseLimit; private final boolean useThreePhaseUpdates; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAccessControlTlsHandshakeClientAuth; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final boolean reconfigurableZookeeperServer; private final boolean useBucketExecutorForLidSpaceCompact; private final boolean enableFeedBlockInDistributor; private final double maxDeadBytesRatio; private final int clusterControllerMaxHeapSizeInMb; @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public boolean reconfigurableZookeeperServer() { return reconfigurableZookeeperServer; } @Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; } @Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; } @Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; } @Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; } private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .boxedValue(); } }
But that enum constant I don't see!
public FeatureFlags(FlagSource source, ApplicationId appId) { this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT); this.useThreePhaseUpdates = flagValue(source, appId, Flags.USE_THREE_PHASE_UPDATES); this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD); this.useAccessControlTlsHandshakeClientAuth = flagValue(source, appId, Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY); this.reconfigurableZookeeperServer = flagValue(source, appId, Flags.RECONFIGURABLE_ZOOKEEPER_SERVER_FOR_CLUSTER_CONTROLLER); this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT); this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR); this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO); this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB); }
this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB);
public FeatureFlags(FlagSource source, ApplicationId appId) { this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT); this.useThreePhaseUpdates = flagValue(source, appId, Flags.USE_THREE_PHASE_UPDATES); this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE); this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE); this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS); this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD); this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD); this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD); this.useAccessControlTlsHandshakeClientAuth = flagValue(source, appId, Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION); this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE); this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY); this.reconfigurableZookeeperServer = flagValue(source, appId, Flags.RECONFIGURABLE_ZOOKEEPER_SERVER_FOR_CLUSTER_CONTROLLER); this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT); this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR); this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO); this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB); }
class FeatureFlags implements ModelContext.FeatureFlags { private final double defaultTermwiseLimit; private final boolean useThreePhaseUpdates; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAccessControlTlsHandshakeClientAuth; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final boolean reconfigurableZookeeperServer; private final boolean useBucketExecutorForLidSpaceCompact; private final boolean enableFeedBlockInDistributor; private final double maxDeadBytesRatio; private final int clusterControllerMaxHeapSizeInMb; @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public boolean reconfigurableZookeeperServer() { return reconfigurableZookeeperServer; } @Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; } @Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; } @Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; } @Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; } private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .boxedValue(); } }
class FeatureFlags implements ModelContext.FeatureFlags { private final double defaultTermwiseLimit; private final boolean useThreePhaseUpdates; private final String feedSequencer; private final String responseSequencer; private final int numResponseThreads; private final boolean skipCommunicationManagerThread; private final boolean skipMbusRequestThread; private final boolean skipMbusReplyThread; private final boolean useAccessControlTlsHandshakeClientAuth; private final boolean useAsyncMessageHandlingOnSchedule; private final double feedConcurrency; private final boolean reconfigurableZookeeperServer; private final boolean useBucketExecutorForLidSpaceCompact; private final boolean enableFeedBlockInDistributor; private final double maxDeadBytesRatio; private final int clusterControllerMaxHeapSizeInMb; @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; } @Override public String feedSequencerType() { return feedSequencer; } @Override public String responseSequencerType() { return responseSequencer; } @Override public int defaultNumResponseThreads() { return numResponseThreads; } @Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; } @Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; } @Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; } @Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; } @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; } @Override public double feedConcurrency() { return feedConcurrency; } @Override public boolean reconfigurableZookeeperServer() { return reconfigurableZookeeperServer; } @Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; } @Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; } @Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; } @Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; } private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()) .boxedValue(); } }
Yes, I think that would be nice. It would make the error messages less confusing.
public void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.490 > 0.400)", feedBlock.getDescription()); }
public void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.490 > 0.400)", feedBlock.getDescription()); }
class ResourceExhaustionCalculatorTest { @Test public void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void feed_block_returned_when_single_resource_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk:a-fancy-disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [unknown hostname] (0.510 > 0.500), " + "memory on node 2 [unknown hostname] (0.850 > 0.800)", feedBlock.getDescription()); } @Test public void feed_block_returned_when_multiple_resources_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400)", feedBlock.getDescription()); } @Test public void feed_block_description_is_bounded_in_number_of_described_resources() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6)), forNode(3, usage("disk", 0.6), usage("memory", 0.9))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400) (... and 2 more)", feedBlock.getDescription()); } @Test public void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test @Test public void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.480 > 0.400)", feedBlock.getDescription()); } @Test public void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } }
class ResourceExhaustionCalculatorTest { @Test public void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void feed_block_returned_when_single_resource_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk:a-fancy-disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [unknown hostname] (0.510 > 0.500), " + "memory on node 2 [unknown hostname] (0.850 > 0.800)", feedBlock.getDescription()); } @Test public void feed_block_returned_when_multiple_resources_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400)", feedBlock.getDescription()); } @Test public void feed_block_description_is_bounded_in_number_of_described_resources() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6)), forNode(3, usage("disk", 0.6), usage("memory", 0.9))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400) (... and 2 more)", feedBlock.getDescription()); } @Test public void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test @Test public void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.480 > 0.400)", feedBlock.getDescription()); } @Test public void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } }
Is it the adjusted threshold (0.4) we should use in the comment?
public void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.480 > 0.400)", feedBlock.getDescription()); }
public void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.480 > 0.400)", feedBlock.getDescription()); }
class ResourceExhaustionCalculatorTest { @Test public void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void feed_block_returned_when_single_resource_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk:a-fancy-disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [unknown hostname] (0.510 > 0.500), " + "memory on node 2 [unknown hostname] (0.850 > 0.800)", feedBlock.getDescription()); } @Test public void feed_block_returned_when_multiple_resources_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400)", feedBlock.getDescription()); } @Test public void feed_block_description_is_bounded_in_number_of_described_resources() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6)), forNode(3, usage("disk", 0.6), usage("memory", 0.9))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400) (... and 2 more)", feedBlock.getDescription()); } @Test public void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.490 > 0.400)", feedBlock.getDescription()); } @Test @Test public void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } }
class ResourceExhaustionCalculatorTest { @Test public void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void feed_block_returned_when_single_resource_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk:a-fancy-disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [unknown hostname] (0.510 > 0.500), " + "memory on node 2 [unknown hostname] (0.850 > 0.800)", feedBlock.getDescription()); } @Test public void feed_block_returned_when_multiple_resources_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400)", feedBlock.getDescription()); } @Test public void feed_block_description_is_bounded_in_number_of_described_resources() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6)), forNode(3, usage("disk", 0.6), usage("memory", 0.9))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400) (... and 2 more)", feedBlock.getDescription()); } @Test public void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.490 > 0.400)", feedBlock.getDescription()); } @Test @Test public void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } }
In this case, the comment is referring to the original feed limit, not the adjusted hysteresis limit. It tests going from feed blocked above the hysteresis limit (but below the original limit) to feed blocked _slightly less_ above the hysteresis limit.
public void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.480 > 0.400)", feedBlock.getDescription()); }
public void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.480 > 0.400)", feedBlock.getDescription()); }
class ResourceExhaustionCalculatorTest { @Test public void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void feed_block_returned_when_single_resource_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk:a-fancy-disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [unknown hostname] (0.510 > 0.500), " + "memory on node 2 [unknown hostname] (0.850 > 0.800)", feedBlock.getDescription()); } @Test public void feed_block_returned_when_multiple_resources_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400)", feedBlock.getDescription()); } @Test public void feed_block_description_is_bounded_in_number_of_described_resources() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6)), forNode(3, usage("disk", 0.6), usage("memory", 0.9))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400) (... and 2 more)", feedBlock.getDescription()); } @Test public void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.490 > 0.400)", feedBlock.getDescription()); } @Test @Test public void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } }
class ResourceExhaustionCalculatorTest { @Test public void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void feed_block_returned_when_single_resource_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk:a-fancy-disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [unknown hostname] (0.510 > 0.500), " + "memory on node 2 [unknown hostname] (0.850 > 0.800)", feedBlock.getDescription()); } @Test public void feed_block_returned_when_multiple_resources_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400)", feedBlock.getDescription()); } @Test public void feed_block_description_is_bounded_in_number_of_described_resources() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6)), forNode(3, usage("disk", 0.6), usage("memory", 0.9))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400) (... and 2 more)", feedBlock.getDescription()); } @Test public void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.490 > 0.400)", feedBlock.getDescription()); } @Test @Test public void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } }
Will do as a separate quality-of-life pass later
public void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.490 > 0.400)", feedBlock.getDescription()); }
public void retain_node_feed_block_status_when_within_hysteresis_window_limit_crossed_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.51))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.49)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.490 > 0.400)", feedBlock.getDescription()); }
class ResourceExhaustionCalculatorTest { @Test public void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void feed_block_returned_when_single_resource_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk:a-fancy-disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [unknown hostname] (0.510 > 0.500), " + "memory on node 2 [unknown hostname] (0.850 > 0.800)", feedBlock.getDescription()); } @Test public void feed_block_returned_when_multiple_resources_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400)", feedBlock.getDescription()); } @Test public void feed_block_description_is_bounded_in_number_of_described_resources() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6)), forNode(3, usage("disk", 0.6), usage("memory", 0.9))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400) (... and 2 more)", feedBlock.getDescription()); } @Test public void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test @Test public void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.480 > 0.400)", feedBlock.getDescription()); } @Test public void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } }
class ResourceExhaustionCalculatorTest { @Test public void no_feed_block_returned_when_no_resources_lower_than_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.49), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test public void feed_block_returned_when_single_resource_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk:a-fancy-disk on node 1 [storage.1.local] (0.510 > 0.500)", feedBlock.getDescription()); } @Test public void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [unknown hostname] (0.510 > 0.500), " + "memory on node 2 [unknown hostname] (0.850 > 0.800)", feedBlock.getDescription()); } @Test public void feed_block_returned_when_multiple_resources_beyond_limit() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400)", feedBlock.getDescription()); } @Test public void feed_block_description_is_bounded_in_number_of_described_resources() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.4), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.85)), forNode(2, usage("disk", 0.45), usage("memory", 0.6)), forNode(3, usage("disk", 0.6), usage("memory", 0.9))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals("disk on node 1 [storage.1.local] (0.510 > 0.400), " + "memory on node 1 [storage.1.local] (0.850 > 0.800), " + "disk on node 2 [storage.2.local] (0.450 > 0.400) (... and 2 more)", feedBlock.getDescription()); } @Test public void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } @Test @Test public void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNotNull(feedBlock); assertEquals("memory on node 1 [storage.1.local] (0.480 > 0.400)", feedBlock.getDescription()); } @Test public void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster().getNodeInfo()); assertNull(feedBlock); } }
I think you want a copy—from what I can see, modifications to the underlying collection will still cause `ConcurrentModificationException` when you iterate through the unmodifiable view, as that just forwards to the underlying collection.
public void deleteExpiredLocalSessions() { Map<Tenant, Collection<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> sessionsPerTenant.put(tenant, Collections.unmodifiableCollection(tenant.getSessionRepository().getLocalSessions()))); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { Session activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); }
Collections.unmodifiableCollection(tenant.getSessionRepository().getLocalSessions())));
public void deleteExpiredLocalSessions() { Map<Tenant, Collection<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> sessionsPerTenant.put(tenant, List.copyOf(tenant.getSessionRepository().getLocalSessions()))); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { Session activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, ClusterReindexingStatusClient.DUMMY_INSTANCE); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, ClusterReindexingStatusClient.DUMMY_INSTANCE); } }
Fixed
public void deleteExpiredLocalSessions() { Map<Tenant, Collection<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> sessionsPerTenant.put(tenant, Collections.unmodifiableCollection(tenant.getSessionRepository().getLocalSessions()))); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { Session activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); }
Collections.unmodifiableCollection(tenant.getSessionRepository().getLocalSessions())));
public void deleteExpiredLocalSessions() { Map<Tenant, Collection<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> sessionsPerTenant.put(tenant, List.copyOf(tenant.getSessionRepository().getLocalSessions()))); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { Session activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, ClusterReindexingStatusClient.DUMMY_INSTANCE); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, ClusterReindexingStatusClient.DUMMY_INSTANCE); } }
I think this is on the high side. This means that you will have a 48M buffer on even the smallest container. Is this really necessary ? What is the gain. 4M sound like plenty.
private static void runCompressionZstd(NativeIO nativeIO, Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); int bufferSize = 48*1024*1024; try (FileOutputStream fileOut = AtomicFileOutputStream.create(compressedFile); ZstdOuputStream out = new ZstdOuputStream(fileOut, bufferSize); FileInputStream in = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, out, fileOut.getFD(), in, bufferSize); out.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(compressedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } }
int bufferSize = 48*1024*1024;
private static void runCompressionZstd(NativeIO nativeIO, Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); int bufferSize = 48*1024*1024; try (FileOutputStream fileOut = AtomicFileOutputStream.create(compressedFile); ZstdOuputStream out = new ZstdOuputStream(fileOut, bufferSize); FileInputStream in = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, out, fileOut.getFD(), in, bufferSize); out.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(compressedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private PageCacheFriendlyFileOutputStream fileOutput = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = createCompressionTaskExecutor(); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, String threadName, Pollable<LOGTYPE> operationProvider) { super(threadName); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } private static ExecutorService createCompressionTaskExecutor() { return Executors.newSingleThreadExecutor(runnable -> { Thread thread = new Thread(runnable, "logfilehandler.compression"); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; }); } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private void internalFlush() { try { if (fileOutput != null) { fileOutput.flush(); } } catch (IOException e) { logger.log(Level.WARNING, "Failed to flush file output: " + Exceptions.toMessageString(e), e); } } private void internalClose() { try { if (fileOutput != null) { fileOutput.flush(); fileOutput.close(); fileOutput = null; } } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file: " + e.getMessage(), e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || fileOutput == null) { internalRotateNow(); } try { logWriter.write(r, fileOutput); fileOutput.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalClose(); try { checkAndCreateDir(fileName); fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024); LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { Path oldFile = Paths.get(oldFileName); if (Files.exists(oldFile)) { executor.execute(() -> runCompression(nativeIO, oldFile, compression)); } } } private static void runCompression(NativeIO nativeIO, Path oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(nativeIO, oldFile); break; case GZIP: runCompressionGzip(nativeIO, oldFile); break; case NONE: runCompressionNone(nativeIO, oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionNone(NativeIO nativeIO, Path oldFile) { nativeIO.dropFileFromCache(oldFile.toFile()); } private static void runCompressionGzip(NativeIO nativeIO, Path oldFile) { try { Path gzippedFile = Paths.get(oldFile.toString() + ".gz"); try (FileOutputStream fileOut = AtomicFileOutputStream.create(gzippedFile); GZIPOutputStream compressor = new GZIPOutputStream(fileOut, 0x100000); FileInputStream inputStream = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, compressor, fileOut.getFD(), inputStream, 0x400000); compressor.finish(); compressor.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(gzippedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with gzip: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } } private static void pageFriendlyTransfer(NativeIO nativeIO, OutputStream out, FileDescriptor outDescriptor, FileInputStream in, int bufferSize) throws IOException { int read; long totalBytesRead = 0; byte[] buffer = new byte[bufferSize]; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); if (read > 0) { nativeIO.dropPartialFileFromCache(in.getFD(), totalBytesRead, read, false); nativeIO.dropPartialFileFromCache(outDescriptor, totalBytesRead, read, false); } totalBytesRead += read; } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; Path target = Paths.get(fileName); Path link = target.resolveSibling(symlinkName); try { Files.deleteIfExists(link); Files.createSymbolicLink(link, target.getFileName()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to create symbolic link to current log file: " + e.getMessage(), e); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private PageCacheFriendlyFileOutputStream fileOutput = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = createCompressionTaskExecutor(); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, String threadName, Pollable<LOGTYPE> operationProvider) { super(threadName); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } private static ExecutorService createCompressionTaskExecutor() { return Executors.newSingleThreadExecutor(runnable -> { Thread thread = new Thread(runnable, "logfilehandler.compression"); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; }); } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private void internalFlush() { try { if (fileOutput != null) { fileOutput.flush(); } } catch (IOException e) { logger.log(Level.WARNING, "Failed to flush file output: " + Exceptions.toMessageString(e), e); } } private void internalClose() { try { if (fileOutput != null) { fileOutput.flush(); fileOutput.close(); fileOutput = null; } } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file: " + e.getMessage(), e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || fileOutput == null) { internalRotateNow(); } try { logWriter.write(r, fileOutput); fileOutput.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalClose(); try { checkAndCreateDir(fileName); fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024); LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { Path oldFile = Paths.get(oldFileName); if (Files.exists(oldFile)) { executor.execute(() -> runCompression(nativeIO, oldFile, compression)); } } } private static void runCompression(NativeIO nativeIO, Path oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(nativeIO, oldFile); break; case GZIP: runCompressionGzip(nativeIO, oldFile); break; case NONE: runCompressionNone(nativeIO, oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionNone(NativeIO nativeIO, Path oldFile) { nativeIO.dropFileFromCache(oldFile.toFile()); } private static void runCompressionGzip(NativeIO nativeIO, Path oldFile) { try { Path gzippedFile = Paths.get(oldFile.toString() + ".gz"); try (FileOutputStream fileOut = AtomicFileOutputStream.create(gzippedFile); GZIPOutputStream compressor = new GZIPOutputStream(fileOut, 0x100000); FileInputStream inputStream = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, compressor, fileOut.getFD(), inputStream, 0x400000); compressor.finish(); compressor.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(gzippedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with gzip: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } } private static void pageFriendlyTransfer(NativeIO nativeIO, OutputStream out, FileDescriptor outDescriptor, FileInputStream in, int bufferSize) throws IOException { int read; long totalBytesRead = 0; byte[] buffer = new byte[bufferSize]; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); if (read > 0) { nativeIO.dropPartialFileFromCache(in.getFD(), totalBytesRead, read, false); nativeIO.dropPartialFileFromCache(outDescriptor, totalBytesRead, read, false); } totalBytesRead += read; } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; Path target = Paths.get(fileName); Path link = target.resolveSibling(symlinkName); try { Files.deleteIfExists(link); Files.createSymbolicLink(link, target.getFileName()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to create symbolic link to current log file: " + e.getMessage(), e); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
The motivation was to improve compression ratio compared gzip. The zstandard frames are independent, so bigger frame should reduce the overall file size (for large access logs).
private static void runCompressionZstd(NativeIO nativeIO, Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); int bufferSize = 48*1024*1024; try (FileOutputStream fileOut = AtomicFileOutputStream.create(compressedFile); ZstdOuputStream out = new ZstdOuputStream(fileOut, bufferSize); FileInputStream in = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, out, fileOut.getFD(), in, bufferSize); out.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(compressedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } }
int bufferSize = 48*1024*1024;
private static void runCompressionZstd(NativeIO nativeIO, Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); int bufferSize = 48*1024*1024; try (FileOutputStream fileOut = AtomicFileOutputStream.create(compressedFile); ZstdOuputStream out = new ZstdOuputStream(fileOut, bufferSize); FileInputStream in = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, out, fileOut.getFD(), in, bufferSize); out.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(compressedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private PageCacheFriendlyFileOutputStream fileOutput = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = createCompressionTaskExecutor(); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, String threadName, Pollable<LOGTYPE> operationProvider) { super(threadName); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } private static ExecutorService createCompressionTaskExecutor() { return Executors.newSingleThreadExecutor(runnable -> { Thread thread = new Thread(runnable, "logfilehandler.compression"); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; }); } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private void internalFlush() { try { if (fileOutput != null) { fileOutput.flush(); } } catch (IOException e) { logger.log(Level.WARNING, "Failed to flush file output: " + Exceptions.toMessageString(e), e); } } private void internalClose() { try { if (fileOutput != null) { fileOutput.flush(); fileOutput.close(); fileOutput = null; } } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file: " + e.getMessage(), e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || fileOutput == null) { internalRotateNow(); } try { logWriter.write(r, fileOutput); fileOutput.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalClose(); try { checkAndCreateDir(fileName); fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024); LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { Path oldFile = Paths.get(oldFileName); if (Files.exists(oldFile)) { executor.execute(() -> runCompression(nativeIO, oldFile, compression)); } } } private static void runCompression(NativeIO nativeIO, Path oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(nativeIO, oldFile); break; case GZIP: runCompressionGzip(nativeIO, oldFile); break; case NONE: runCompressionNone(nativeIO, oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionNone(NativeIO nativeIO, Path oldFile) { nativeIO.dropFileFromCache(oldFile.toFile()); } private static void runCompressionGzip(NativeIO nativeIO, Path oldFile) { try { Path gzippedFile = Paths.get(oldFile.toString() + ".gz"); try (FileOutputStream fileOut = AtomicFileOutputStream.create(gzippedFile); GZIPOutputStream compressor = new GZIPOutputStream(fileOut, 0x100000); FileInputStream inputStream = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, compressor, fileOut.getFD(), inputStream, 0x400000); compressor.finish(); compressor.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(gzippedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with gzip: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } } private static void pageFriendlyTransfer(NativeIO nativeIO, OutputStream out, FileDescriptor outDescriptor, FileInputStream in, int bufferSize) throws IOException { int read; long totalBytesRead = 0; byte[] buffer = new byte[bufferSize]; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); if (read > 0) { nativeIO.dropPartialFileFromCache(in.getFD(), totalBytesRead, read, false); nativeIO.dropPartialFileFromCache(outDescriptor, totalBytesRead, read, false); } totalBytesRead += read; } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; Path target = Paths.get(fileName); Path link = target.resolveSibling(symlinkName); try { Files.deleteIfExists(link); Files.createSymbolicLink(link, target.getFileName()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to create symbolic link to current log file: " + e.getMessage(), e); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private PageCacheFriendlyFileOutputStream fileOutput = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = createCompressionTaskExecutor(); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, String threadName, Pollable<LOGTYPE> operationProvider) { super(threadName); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } private static ExecutorService createCompressionTaskExecutor() { return Executors.newSingleThreadExecutor(runnable -> { Thread thread = new Thread(runnable, "logfilehandler.compression"); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; }); } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private void internalFlush() { try { if (fileOutput != null) { fileOutput.flush(); } } catch (IOException e) { logger.log(Level.WARNING, "Failed to flush file output: " + Exceptions.toMessageString(e), e); } } private void internalClose() { try { if (fileOutput != null) { fileOutput.flush(); fileOutput.close(); fileOutput = null; } } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file: " + e.getMessage(), e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || fileOutput == null) { internalRotateNow(); } try { logWriter.write(r, fileOutput); fileOutput.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalClose(); try { checkAndCreateDir(fileName); fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024); LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { Path oldFile = Paths.get(oldFileName); if (Files.exists(oldFile)) { executor.execute(() -> runCompression(nativeIO, oldFile, compression)); } } } private static void runCompression(NativeIO nativeIO, Path oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(nativeIO, oldFile); break; case GZIP: runCompressionGzip(nativeIO, oldFile); break; case NONE: runCompressionNone(nativeIO, oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionNone(NativeIO nativeIO, Path oldFile) { nativeIO.dropFileFromCache(oldFile.toFile()); } private static void runCompressionGzip(NativeIO nativeIO, Path oldFile) { try { Path gzippedFile = Paths.get(oldFile.toString() + ".gz"); try (FileOutputStream fileOut = AtomicFileOutputStream.create(gzippedFile); GZIPOutputStream compressor = new GZIPOutputStream(fileOut, 0x100000); FileInputStream inputStream = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, compressor, fileOut.getFD(), inputStream, 0x400000); compressor.finish(); compressor.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(gzippedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with gzip: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } } private static void pageFriendlyTransfer(NativeIO nativeIO, OutputStream out, FileDescriptor outDescriptor, FileInputStream in, int bufferSize) throws IOException { int read; long totalBytesRead = 0; byte[] buffer = new byte[bufferSize]; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); if (read > 0) { nativeIO.dropPartialFileFromCache(in.getFD(), totalBytesRead, read, false); nativeIO.dropPartialFileFromCache(outDescriptor, totalBytesRead, read, false); } totalBytesRead += read; } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; Path target = Paths.get(fileName); Path link = target.resolveSibling(symlinkName); try { Files.deleteIfExists(link); Files.createSymbolicLink(link, target.getFileName()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to create symbolic link to current log file: " + e.getMessage(), e); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
You could change it to config and use smaller value for non-application container cluster.
private static void runCompressionZstd(NativeIO nativeIO, Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); int bufferSize = 48*1024*1024; try (FileOutputStream fileOut = AtomicFileOutputStream.create(compressedFile); ZstdOuputStream out = new ZstdOuputStream(fileOut, bufferSize); FileInputStream in = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, out, fileOut.getFD(), in, bufferSize); out.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(compressedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } }
int bufferSize = 48*1024*1024;
private static void runCompressionZstd(NativeIO nativeIO, Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); int bufferSize = 48*1024*1024; try (FileOutputStream fileOut = AtomicFileOutputStream.create(compressedFile); ZstdOuputStream out = new ZstdOuputStream(fileOut, bufferSize); FileInputStream in = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, out, fileOut.getFD(), in, bufferSize); out.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(compressedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private PageCacheFriendlyFileOutputStream fileOutput = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = createCompressionTaskExecutor(); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, String threadName, Pollable<LOGTYPE> operationProvider) { super(threadName); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } private static ExecutorService createCompressionTaskExecutor() { return Executors.newSingleThreadExecutor(runnable -> { Thread thread = new Thread(runnable, "logfilehandler.compression"); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; }); } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private void internalFlush() { try { if (fileOutput != null) { fileOutput.flush(); } } catch (IOException e) { logger.log(Level.WARNING, "Failed to flush file output: " + Exceptions.toMessageString(e), e); } } private void internalClose() { try { if (fileOutput != null) { fileOutput.flush(); fileOutput.close(); fileOutput = null; } } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file: " + e.getMessage(), e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || fileOutput == null) { internalRotateNow(); } try { logWriter.write(r, fileOutput); fileOutput.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalClose(); try { checkAndCreateDir(fileName); fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024); LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { Path oldFile = Paths.get(oldFileName); if (Files.exists(oldFile)) { executor.execute(() -> runCompression(nativeIO, oldFile, compression)); } } } private static void runCompression(NativeIO nativeIO, Path oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(nativeIO, oldFile); break; case GZIP: runCompressionGzip(nativeIO, oldFile); break; case NONE: runCompressionNone(nativeIO, oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionNone(NativeIO nativeIO, Path oldFile) { nativeIO.dropFileFromCache(oldFile.toFile()); } private static void runCompressionGzip(NativeIO nativeIO, Path oldFile) { try { Path gzippedFile = Paths.get(oldFile.toString() + ".gz"); try (FileOutputStream fileOut = AtomicFileOutputStream.create(gzippedFile); GZIPOutputStream compressor = new GZIPOutputStream(fileOut, 0x100000); FileInputStream inputStream = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, compressor, fileOut.getFD(), inputStream, 0x400000); compressor.finish(); compressor.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(gzippedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with gzip: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } } private static void pageFriendlyTransfer(NativeIO nativeIO, OutputStream out, FileDescriptor outDescriptor, FileInputStream in, int bufferSize) throws IOException { int read; long totalBytesRead = 0; byte[] buffer = new byte[bufferSize]; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); if (read > 0) { nativeIO.dropPartialFileFromCache(in.getFD(), totalBytesRead, read, false); nativeIO.dropPartialFileFromCache(outDescriptor, totalBytesRead, read, false); } totalBytesRead += read; } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; Path target = Paths.get(fileName); Path link = target.resolveSibling(symlinkName); try { Files.deleteIfExists(link); Files.createSymbolicLink(link, target.getFileName()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to create symbolic link to current log file: " + e.getMessage(), e); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private PageCacheFriendlyFileOutputStream fileOutput = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = createCompressionTaskExecutor(); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, String threadName, Pollable<LOGTYPE> operationProvider) { super(threadName); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } private static ExecutorService createCompressionTaskExecutor() { return Executors.newSingleThreadExecutor(runnable -> { Thread thread = new Thread(runnable, "logfilehandler.compression"); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; }); } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private void internalFlush() { try { if (fileOutput != null) { fileOutput.flush(); } } catch (IOException e) { logger.log(Level.WARNING, "Failed to flush file output: " + Exceptions.toMessageString(e), e); } } private void internalClose() { try { if (fileOutput != null) { fileOutput.flush(); fileOutput.close(); fileOutput = null; } } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file: " + e.getMessage(), e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || fileOutput == null) { internalRotateNow(); } try { logWriter.write(r, fileOutput); fileOutput.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalClose(); try { checkAndCreateDir(fileName); fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024); LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { Path oldFile = Paths.get(oldFileName); if (Files.exists(oldFile)) { executor.execute(() -> runCompression(nativeIO, oldFile, compression)); } } } private static void runCompression(NativeIO nativeIO, Path oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(nativeIO, oldFile); break; case GZIP: runCompressionGzip(nativeIO, oldFile); break; case NONE: runCompressionNone(nativeIO, oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionNone(NativeIO nativeIO, Path oldFile) { nativeIO.dropFileFromCache(oldFile.toFile()); } private static void runCompressionGzip(NativeIO nativeIO, Path oldFile) { try { Path gzippedFile = Paths.get(oldFile.toString() + ".gz"); try (FileOutputStream fileOut = AtomicFileOutputStream.create(gzippedFile); GZIPOutputStream compressor = new GZIPOutputStream(fileOut, 0x100000); FileInputStream inputStream = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, compressor, fileOut.getFD(), inputStream, 0x400000); compressor.finish(); compressor.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(gzippedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with gzip: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } } private static void pageFriendlyTransfer(NativeIO nativeIO, OutputStream out, FileDescriptor outDescriptor, FileInputStream in, int bufferSize) throws IOException { int read; long totalBytesRead = 0; byte[] buffer = new byte[bufferSize]; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); if (read > 0) { nativeIO.dropPartialFileFromCache(in.getFD(), totalBytesRead, read, false); nativeIO.dropPartialFileFromCache(outDescriptor, totalBytesRead, read, false); } totalBytesRead += read; } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; Path target = Paths.get(fileName); Path link = target.resolveSibling(symlinkName); try { Files.deleteIfExists(link); Files.createSymbolicLink(link, target.getFileName()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to create symbolic link to current log file: " + e.getMessage(), e); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
Have you measured the gain. When I tested earlier on there was not very much gain after 1M. Then it is better to increase compression level.
private static void runCompressionZstd(NativeIO nativeIO, Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); int bufferSize = 48*1024*1024; try (FileOutputStream fileOut = AtomicFileOutputStream.create(compressedFile); ZstdOuputStream out = new ZstdOuputStream(fileOut, bufferSize); FileInputStream in = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, out, fileOut.getFD(), in, bufferSize); out.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(compressedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } }
int bufferSize = 48*1024*1024;
private static void runCompressionZstd(NativeIO nativeIO, Path oldFile) { try { Path compressedFile = Paths.get(oldFile.toString() + ".zst"); int bufferSize = 48*1024*1024; try (FileOutputStream fileOut = AtomicFileOutputStream.create(compressedFile); ZstdOuputStream out = new ZstdOuputStream(fileOut, bufferSize); FileInputStream in = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, out, fileOut.getFD(), in, bufferSize); out.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(compressedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with zstd: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private PageCacheFriendlyFileOutputStream fileOutput = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = createCompressionTaskExecutor(); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, String threadName, Pollable<LOGTYPE> operationProvider) { super(threadName); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } private static ExecutorService createCompressionTaskExecutor() { return Executors.newSingleThreadExecutor(runnable -> { Thread thread = new Thread(runnable, "logfilehandler.compression"); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; }); } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private void internalFlush() { try { if (fileOutput != null) { fileOutput.flush(); } } catch (IOException e) { logger.log(Level.WARNING, "Failed to flush file output: " + Exceptions.toMessageString(e), e); } } private void internalClose() { try { if (fileOutput != null) { fileOutput.flush(); fileOutput.close(); fileOutput = null; } } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file: " + e.getMessage(), e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || fileOutput == null) { internalRotateNow(); } try { logWriter.write(r, fileOutput); fileOutput.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalClose(); try { checkAndCreateDir(fileName); fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024); LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { Path oldFile = Paths.get(oldFileName); if (Files.exists(oldFile)) { executor.execute(() -> runCompression(nativeIO, oldFile, compression)); } } } private static void runCompression(NativeIO nativeIO, Path oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(nativeIO, oldFile); break; case GZIP: runCompressionGzip(nativeIO, oldFile); break; case NONE: runCompressionNone(nativeIO, oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionNone(NativeIO nativeIO, Path oldFile) { nativeIO.dropFileFromCache(oldFile.toFile()); } private static void runCompressionGzip(NativeIO nativeIO, Path oldFile) { try { Path gzippedFile = Paths.get(oldFile.toString() + ".gz"); try (FileOutputStream fileOut = AtomicFileOutputStream.create(gzippedFile); GZIPOutputStream compressor = new GZIPOutputStream(fileOut, 0x100000); FileInputStream inputStream = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, compressor, fileOut.getFD(), inputStream, 0x400000); compressor.finish(); compressor.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(gzippedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with gzip: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } } private static void pageFriendlyTransfer(NativeIO nativeIO, OutputStream out, FileDescriptor outDescriptor, FileInputStream in, int bufferSize) throws IOException { int read; long totalBytesRead = 0; byte[] buffer = new byte[bufferSize]; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); if (read > 0) { nativeIO.dropPartialFileFromCache(in.getFD(), totalBytesRead, read, false); nativeIO.dropPartialFileFromCache(outDescriptor, totalBytesRead, read, false); } totalBytesRead += read; } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; Path target = Paths.get(fileName); Path link = target.resolveSibling(symlinkName); try { Files.deleteIfExists(link); Files.createSymbolicLink(link, target.getFileName()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to create symbolic link to current log file: " + e.getMessage(), e); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
class LogThread<LOGTYPE> extends Thread { private final Pollable<LOGTYPE> operationProvider; long lastFlush = 0; private PageCacheFriendlyFileOutputStream fileOutput = null; private long nextRotationTime = 0; private final String filePattern; private volatile String fileName; private final LogWriter<LOGTYPE> logWriter; private final Compression compression; private final long[] rotationTimes; private final String symlinkName; private final ExecutorService executor = createCompressionTaskExecutor(); private final NativeIO nativeIO = new NativeIO(); LogThread(LogWriter<LOGTYPE> logWriter, String filePattern, Compression compression, long[] rotationTimes, String symlinkName, String threadName, Pollable<LOGTYPE> operationProvider) { super(threadName); setDaemon(true); this.logWriter = logWriter; this.filePattern = filePattern; this.compression = compression; this.rotationTimes = rotationTimes; this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null; this.operationProvider = operationProvider; } private static ExecutorService createCompressionTaskExecutor() { return Executors.newSingleThreadExecutor(runnable -> { Thread thread = new Thread(runnable, "logfilehandler.compression"); thread.setDaemon(true); thread.setPriority(Thread.MIN_PRIORITY); return thread; }); } @Override public void run() { try { handleLogOperations(); } catch (InterruptedException e) { } catch (Exception e) { Process.logAndDie("Failed storing log records", e); } internalFlush(); } private void handleLogOperations() throws InterruptedException { while (!isInterrupted()) { Operation<LOGTYPE> r = operationProvider.poll(); if (r != null) { if (r.type == Operation.Type.flush) { internalFlush(); } else if (r.type == Operation.Type.close) { internalClose(); } else if (r.type == Operation.Type.rotate) { internalRotateNow(); lastFlush = System.nanoTime(); } else if (r.type == Operation.Type.log) { internalPublish(r.log.get()); flushIfOld(3, TimeUnit.SECONDS); } r.countDownLatch.countDown(); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { internalFlush(); lastFlush = now; } } private void internalFlush() { try { if (fileOutput != null) { fileOutput.flush(); } } catch (IOException e) { logger.log(Level.WARNING, "Failed to flush file output: " + Exceptions.toMessageString(e), e); } } private void internalClose() { try { if (fileOutput != null) { fileOutput.flush(); fileOutput.close(); fileOutput = null; } } catch (Exception e) { logger.log(Level.WARNING, "Got error while closing log file: " + e.getMessage(), e); } } private void internalPublish(LOGTYPE r) { long now = System.currentTimeMillis(); if (nextRotationTime <= 0) { nextRotationTime = getNextRotationTime(now); } if (now > nextRotationTime || fileOutput == null) { internalRotateNow(); } try { logWriter.write(r, fileOutput); fileOutput.write('\n'); } catch (IOException e) { logger.warning("Failed writing log record: " + Exceptions.toMessageString(e)); } } /** * Find next rotation after specified time. * * @param now the specified time; if zero, current time is used. * @return the next rotation time */ long getNextRotationTime(long now) { if (now <= 0) { now = System.currentTimeMillis(); } long nowTod = timeOfDayMillis(now); long next = 0; for (long rotationTime : rotationTimes) { if (nowTod < rotationTime) { next = rotationTime - nowTod + now; break; } } if (next == 0) { next = rotationTimes[0] + lengthOfDayMillis - nowTod + now; } return next; } private void checkAndCreateDir(String pathname) { int lastSlash = pathname.lastIndexOf("/"); if (lastSlash > -1) { String pathExcludingFilename = pathname.substring(0, lastSlash); File filepath = new File(pathExcludingFilename); if (!filepath.exists()) { filepath.mkdirs(); } } } private void internalRotateNow() { String oldFileName = fileName; long now = System.currentTimeMillis(); fileName = LogFormatter.insertDate(filePattern, now); internalClose(); try { checkAndCreateDir(fileName); fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024); LogFileDb.nowLoggingTo(fileName); } catch (IOException e) { throw new RuntimeException("Couldn't open log file '" + fileName + "'", e); } createSymlinkToCurrentFile(); nextRotationTime = 0; if ((oldFileName != null)) { Path oldFile = Paths.get(oldFileName); if (Files.exists(oldFile)) { executor.execute(() -> runCompression(nativeIO, oldFile, compression)); } } } private static void runCompression(NativeIO nativeIO, Path oldFile, Compression compression) { switch (compression) { case ZSTD: runCompressionZstd(nativeIO, oldFile); break; case GZIP: runCompressionGzip(nativeIO, oldFile); break; case NONE: runCompressionNone(nativeIO, oldFile); break; default: throw new IllegalArgumentException("Unknown compression " + compression); } } private static void runCompressionNone(NativeIO nativeIO, Path oldFile) { nativeIO.dropFileFromCache(oldFile.toFile()); } private static void runCompressionGzip(NativeIO nativeIO, Path oldFile) { try { Path gzippedFile = Paths.get(oldFile.toString() + ".gz"); try (FileOutputStream fileOut = AtomicFileOutputStream.create(gzippedFile); GZIPOutputStream compressor = new GZIPOutputStream(fileOut, 0x100000); FileInputStream inputStream = new FileInputStream(oldFile.toFile())) { pageFriendlyTransfer(nativeIO, compressor, fileOut.getFD(), inputStream, 0x400000); compressor.finish(); compressor.flush(); } Files.delete(oldFile); nativeIO.dropFileFromCache(gzippedFile.toFile()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to compress log file with gzip: " + oldFile, e); } finally { nativeIO.dropFileFromCache(oldFile.toFile()); } } private static void pageFriendlyTransfer(NativeIO nativeIO, OutputStream out, FileDescriptor outDescriptor, FileInputStream in, int bufferSize) throws IOException { int read; long totalBytesRead = 0; byte[] buffer = new byte[bufferSize]; while ((read = in.read(buffer)) >= 0) { out.write(buffer, 0, read); if (read > 0) { nativeIO.dropPartialFileFromCache(in.getFD(), totalBytesRead, read, false); nativeIO.dropPartialFileFromCache(outDescriptor, totalBytesRead, read, false); } totalBytesRead += read; } } /** * Name files by date - create a symlink with a constant name to the newest file */ private void createSymlinkToCurrentFile() { if (symlinkName == null) return; Path target = Paths.get(fileName); Path link = target.resolveSibling(symlinkName); try { Files.deleteIfExists(link); Files.createSymbolicLink(link, target.getFileName()); } catch (IOException e) { logger.log(Level.WARNING, "Failed to create symbolic link to current log file: " + e.getMessage(), e); } } private static final long lengthOfDayMillis = 24 * 60 * 60 * 1000; private static long timeOfDayMillis(long time) { return time % lengthOfDayMillis; } }
The shortest example in your unit test is 60 bytes. Might as well increase this a bit more then? :)
private static Path destination(ApplicationId app, HostName hostName, String dir, Path filename, String extension) { StringBuilder sb = new StringBuilder(50).append('/'); if (app == null) sb.append("infrastructure"); else sb.append(app.tenant().value()).append('.').append(app.application().value()).append('.').append(app.instance().value()); sb.append('/'); for (char c: hostName.value().toCharArray()) { if (c == '.') break; sb.append(c); } sb.append('/').append(dir).append('/').append(filename.getFileName().toString()); if (extension != null) sb.append(extension); return Paths.get(sb.toString()); }
StringBuilder sb = new StringBuilder(50).append('/');
private static Path destination(ApplicationId app, HostName hostName, String dir, Path filename, String extension) { StringBuilder sb = new StringBuilder(100).append('/'); if (app == null) sb.append("infrastructure"); else sb.append(app.tenant().value()).append('.').append(app.application().value()).append('.').append(app.instance().value()); sb.append('/'); for (char c: hostName.value().toCharArray()) { if (c == '.') break; sb.append(c); } sb.append('/').append(dir).append('/').append(filename.getFileName().toString()); if (extension != null) sb.append(extension); return Paths.get(sb.toString()); }
class SyncFileInfo { private final String bucketName; private final Path srcPath; private final Path destPath; private final boolean compressWithZstd; private SyncFileInfo(String bucketName, Path srcPath, Path destPath, boolean compressWithZstd) { this.bucketName = bucketName; this.srcPath = srcPath; this.destPath = destPath; this.compressWithZstd = compressWithZstd; } public String bucketName() { return bucketName; } public Path srcPath() { return srcPath; } public Path destPath() { return destPath; } public InputStream inputStream() throws IOException { InputStream is = Files.newInputStream(srcPath); if (compressWithZstd) return new ZstdCompressingInputStream(is, 4 << 20); return is; } public static SyncFileInfo tenantVespaLog(String bucketName, ApplicationId applicationId, HostName hostName, Path vespaLogFile) { return new SyncFileInfo(bucketName, vespaLogFile, destination(applicationId, hostName, "logs/vespa", vespaLogFile, ".zst"), true); } public static SyncFileInfo tenantAccessLog(String bucketName, ApplicationId applicationId, HostName hostName, Path accessLogFile) { return new SyncFileInfo(bucketName, accessLogFile, destination(applicationId, hostName, "logs/access", accessLogFile, null), false); } public static SyncFileInfo infrastructureVespaLog(String bucketName, HostName hostName, Path vespaLogFile) { return new SyncFileInfo(bucketName, vespaLogFile, destination(null, hostName, "logs/vespa", vespaLogFile, ".zst"), true); } public static SyncFileInfo infrastructureAccessLog(String bucketName, HostName hostName, Path accessLogFile) { return new SyncFileInfo(bucketName, accessLogFile, destination(null, hostName, "logs/access", accessLogFile, null), false); } }
class SyncFileInfo { private final String bucketName; private final Path srcPath; private final Path destPath; private final boolean compressWithZstd; private SyncFileInfo(String bucketName, Path srcPath, Path destPath, boolean compressWithZstd) { this.bucketName = bucketName; this.srcPath = srcPath; this.destPath = destPath; this.compressWithZstd = compressWithZstd; } public String bucketName() { return bucketName; } public Path srcPath() { return srcPath; } public Path destPath() { return destPath; } public InputStream inputStream() throws IOException { InputStream is = Files.newInputStream(srcPath); if (compressWithZstd) return new ZstdCompressingInputStream(is, 4 << 20); return is; } public static SyncFileInfo tenantVespaLog(String bucketName, ApplicationId applicationId, HostName hostName, Path vespaLogFile) { return new SyncFileInfo(bucketName, vespaLogFile, destination(applicationId, hostName, "logs/vespa", vespaLogFile, ".zst"), true); } public static SyncFileInfo tenantAccessLog(String bucketName, ApplicationId applicationId, HostName hostName, Path accessLogFile) { return new SyncFileInfo(bucketName, accessLogFile, destination(applicationId, hostName, "logs/access", accessLogFile, null), false); } public static SyncFileInfo infrastructureVespaLog(String bucketName, HostName hostName, Path vespaLogFile) { return new SyncFileInfo(bucketName, vespaLogFile, destination(null, hostName, "logs/vespa", vespaLogFile, ".zst"), true); } public static SyncFileInfo infrastructureAccessLog(String bucketName, HostName hostName, Path accessLogFile) { return new SyncFileInfo(bucketName, accessLogFile, destination(null, hostName, "logs/access", accessLogFile, null), false); } }
:+1: doubled it
private static Path destination(ApplicationId app, HostName hostName, String dir, Path filename, String extension) { StringBuilder sb = new StringBuilder(50).append('/'); if (app == null) sb.append("infrastructure"); else sb.append(app.tenant().value()).append('.').append(app.application().value()).append('.').append(app.instance().value()); sb.append('/'); for (char c: hostName.value().toCharArray()) { if (c == '.') break; sb.append(c); } sb.append('/').append(dir).append('/').append(filename.getFileName().toString()); if (extension != null) sb.append(extension); return Paths.get(sb.toString()); }
StringBuilder sb = new StringBuilder(50).append('/');
private static Path destination(ApplicationId app, HostName hostName, String dir, Path filename, String extension) { StringBuilder sb = new StringBuilder(100).append('/'); if (app == null) sb.append("infrastructure"); else sb.append(app.tenant().value()).append('.').append(app.application().value()).append('.').append(app.instance().value()); sb.append('/'); for (char c: hostName.value().toCharArray()) { if (c == '.') break; sb.append(c); } sb.append('/').append(dir).append('/').append(filename.getFileName().toString()); if (extension != null) sb.append(extension); return Paths.get(sb.toString()); }
class SyncFileInfo { private final String bucketName; private final Path srcPath; private final Path destPath; private final boolean compressWithZstd; private SyncFileInfo(String bucketName, Path srcPath, Path destPath, boolean compressWithZstd) { this.bucketName = bucketName; this.srcPath = srcPath; this.destPath = destPath; this.compressWithZstd = compressWithZstd; } public String bucketName() { return bucketName; } public Path srcPath() { return srcPath; } public Path destPath() { return destPath; } public InputStream inputStream() throws IOException { InputStream is = Files.newInputStream(srcPath); if (compressWithZstd) return new ZstdCompressingInputStream(is, 4 << 20); return is; } public static SyncFileInfo tenantVespaLog(String bucketName, ApplicationId applicationId, HostName hostName, Path vespaLogFile) { return new SyncFileInfo(bucketName, vespaLogFile, destination(applicationId, hostName, "logs/vespa", vespaLogFile, ".zst"), true); } public static SyncFileInfo tenantAccessLog(String bucketName, ApplicationId applicationId, HostName hostName, Path accessLogFile) { return new SyncFileInfo(bucketName, accessLogFile, destination(applicationId, hostName, "logs/access", accessLogFile, null), false); } public static SyncFileInfo infrastructureVespaLog(String bucketName, HostName hostName, Path vespaLogFile) { return new SyncFileInfo(bucketName, vespaLogFile, destination(null, hostName, "logs/vespa", vespaLogFile, ".zst"), true); } public static SyncFileInfo infrastructureAccessLog(String bucketName, HostName hostName, Path accessLogFile) { return new SyncFileInfo(bucketName, accessLogFile, destination(null, hostName, "logs/access", accessLogFile, null), false); } }
class SyncFileInfo { private final String bucketName; private final Path srcPath; private final Path destPath; private final boolean compressWithZstd; private SyncFileInfo(String bucketName, Path srcPath, Path destPath, boolean compressWithZstd) { this.bucketName = bucketName; this.srcPath = srcPath; this.destPath = destPath; this.compressWithZstd = compressWithZstd; } public String bucketName() { return bucketName; } public Path srcPath() { return srcPath; } public Path destPath() { return destPath; } public InputStream inputStream() throws IOException { InputStream is = Files.newInputStream(srcPath); if (compressWithZstd) return new ZstdCompressingInputStream(is, 4 << 20); return is; } public static SyncFileInfo tenantVespaLog(String bucketName, ApplicationId applicationId, HostName hostName, Path vespaLogFile) { return new SyncFileInfo(bucketName, vespaLogFile, destination(applicationId, hostName, "logs/vespa", vespaLogFile, ".zst"), true); } public static SyncFileInfo tenantAccessLog(String bucketName, ApplicationId applicationId, HostName hostName, Path accessLogFile) { return new SyncFileInfo(bucketName, accessLogFile, destination(applicationId, hostName, "logs/access", accessLogFile, null), false); } public static SyncFileInfo infrastructureVespaLog(String bucketName, HostName hostName, Path vespaLogFile) { return new SyncFileInfo(bucketName, vespaLogFile, destination(null, hostName, "logs/vespa", vespaLogFile, ".zst"), true); } public static SyncFileInfo infrastructureAccessLog(String bucketName, HostName hostName, Path accessLogFile) { return new SyncFileInfo(bucketName, accessLogFile, destination(null, hostName, "logs/access", accessLogFile, null), false); } }
Won't you get 2 conflicting providers for application clusters then ?
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState, boolean zooKeeperLocalhostAffinity) { super(parent, configSubId); this.name = clusterId; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); this.zooKeeperLocalhostAffinity = zooKeeperLocalhostAffinity; componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class.getName(), null, "jdisc_http_service"); addComponent(new DefaultThreadpoolProvider(this)); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider"); addJaxProviders(); }
addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider");
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState, boolean zooKeeperLocalhostAffinity) { super(parent, configSubId); this.name = clusterId; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); this.zooKeeperLocalhostAffinity = zooKeeperLocalhostAffinity; componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class.getName(), null, "jdisc_http_service"); addComponent(new DefaultThreadpoolProvider(this)); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider"); addJaxProviders(); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, ConfigserverConfig.Producer, CuratorConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private final boolean zooKeeperLocalhostAffinity; private final Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private boolean deferChangesUntilRestart = false; public ClusterSpec.Id id() { return ClusterSpec.Id.from(getName()); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setOwner(this); container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(512) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { var compressionType = isHostedVespa ? AccessLogComponent.CompressionType.ZSTD : AccessLogComponent.CompressionType.GZIP; addComponent(new AccessLogComponent(this, AccessLogComponent.AccessLogType.jsonAccessLog, compressionType, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are running by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } @Override public void getConfig(CuratorConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (var container : containers) { builder.server(new CuratorConfig.Server.Builder().hostname(container.getHostResource().getHostname())); } builder.zookeeperLocalhostAffinity(zooKeeperLocalhostAffinity); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public String getEnvironmentVars() { return environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); /** * Mark whether the config emitted by this cluster currently should be applied by clients already running with * a previous generation of it only by restarting the consuming processes. */ public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) { this.deferChangesUntilRestart = deferChangesUntilRestart; } public boolean getDeferChangesUntilRestart() { return deferChangesUntilRestart; } /** Effective vcpu for the containers in cluster. Use this value as scale factor for performance/resource tuning. **/ public OptionalDouble vcpu() { return getContainers().stream() .filter(c -> c.getHostResource() != null && c.getHostResource().realResources() != null) .mapToDouble(c -> c.getHostResource().realResources().vcpu()) .max(); } }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, ConfigserverConfig.Producer, CuratorConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private final boolean zooKeeperLocalhostAffinity; private final Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private boolean deferChangesUntilRestart = false; public ClusterSpec.Id id() { return ClusterSpec.Id.from(getName()); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setOwner(this); container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(512) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { var compressionType = isHostedVespa ? AccessLogComponent.CompressionType.ZSTD : AccessLogComponent.CompressionType.GZIP; addComponent(new AccessLogComponent(this, AccessLogComponent.AccessLogType.jsonAccessLog, compressionType, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are running by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } @Override public void getConfig(CuratorConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (var container : containers) { builder.server(new CuratorConfig.Server.Builder().hostname(container.getHostResource().getHostname())); } builder.zookeeperLocalhostAffinity(zooKeeperLocalhostAffinity); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public String getEnvironmentVars() { return environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); /** * Mark whether the config emitted by this cluster currently should be applied by clients already running with * a previous generation of it only by restarting the consuming processes. */ public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) { this.deferChangesUntilRestart = deferChangesUntilRestart; } public boolean getDeferChangesUntilRestart() { return deferChangesUntilRestart; } /** Effective vcpu for the containers in cluster. Use this value as scale factor for performance/resource tuning. **/ public OptionalDouble vcpu() { return getContainers().stream() .filter(c -> c.getHostResource() != null && c.getHostResource().realResources() != null) .mapToDouble(c -> c.getHostResource().realResources().vcpu()) .max(); } }
No, a provider acts as a fallback if not component implementing `ConnectionLog` is configured.
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState, boolean zooKeeperLocalhostAffinity) { super(parent, configSubId); this.name = clusterId; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); this.zooKeeperLocalhostAffinity = zooKeeperLocalhostAffinity; componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class.getName(), null, "jdisc_http_service"); addComponent(new DefaultThreadpoolProvider(this)); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider"); addJaxProviders(); }
addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider");
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState, boolean zooKeeperLocalhostAffinity) { super(parent, configSubId); this.name = clusterId; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); this.zooKeeperLocalhostAffinity = zooKeeperLocalhostAffinity; componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class.getName(), null, "jdisc_http_service"); addComponent(new DefaultThreadpoolProvider(this)); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider"); addJaxProviders(); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, ConfigserverConfig.Producer, CuratorConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private final boolean zooKeeperLocalhostAffinity; private final Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private boolean deferChangesUntilRestart = false; public ClusterSpec.Id id() { return ClusterSpec.Id.from(getName()); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setOwner(this); container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(512) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { var compressionType = isHostedVespa ? AccessLogComponent.CompressionType.ZSTD : AccessLogComponent.CompressionType.GZIP; addComponent(new AccessLogComponent(this, AccessLogComponent.AccessLogType.jsonAccessLog, compressionType, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are running by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } @Override public void getConfig(CuratorConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (var container : containers) { builder.server(new CuratorConfig.Server.Builder().hostname(container.getHostResource().getHostname())); } builder.zookeeperLocalhostAffinity(zooKeeperLocalhostAffinity); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public String getEnvironmentVars() { return environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); /** * Mark whether the config emitted by this cluster currently should be applied by clients already running with * a previous generation of it only by restarting the consuming processes. */ public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) { this.deferChangesUntilRestart = deferChangesUntilRestart; } public boolean getDeferChangesUntilRestart() { return deferChangesUntilRestart; } /** Effective vcpu for the containers in cluster. Use this value as scale factor for performance/resource tuning. **/ public OptionalDouble vcpu() { return getContainers().stream() .filter(c -> c.getHostResource() != null && c.getHostResource().realResources() != null) .mapToDouble(c -> c.getHostResource().realResources().vcpu()) .max(); } }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, ConfigserverConfig.Producer, CuratorConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private final boolean zooKeeperLocalhostAffinity; private final Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private boolean deferChangesUntilRestart = false; public ClusterSpec.Id id() { return ClusterSpec.Id.from(getName()); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setOwner(this); container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(512) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { var compressionType = isHostedVespa ? AccessLogComponent.CompressionType.ZSTD : AccessLogComponent.CompressionType.GZIP; addComponent(new AccessLogComponent(this, AccessLogComponent.AccessLogType.jsonAccessLog, compressionType, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are running by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } @Override public void getConfig(CuratorConfig.Builder builder) { if (getParent() instanceof ConfigserverCluster) return; for (var container : containers) { builder.server(new CuratorConfig.Server.Builder().hostname(container.getHostResource().getHostname())); } builder.zookeeperLocalhostAffinity(zooKeeperLocalhostAffinity); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public String getEnvironmentVars() { return environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); /** * Mark whether the config emitted by this cluster currently should be applied by clients already running with * a previous generation of it only by restarting the consuming processes. */ public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) { this.deferChangesUntilRestart = deferChangesUntilRestart; } public boolean getDeferChangesUntilRestart() { return deferChangesUntilRestart; } /** Effective vcpu for the containers in cluster. Use this value as scale factor for performance/resource tuning. **/ public OptionalDouble vcpu() { return getContainers().stream() .filter(c -> c.getHostResource() != null && c.getHostResource().realResources() != null) .mapToDouble(c -> c.getHostResource().realResources().vcpu()) .max(); } }
Instead of spread '1' around I would make it a constant that could be used in the tests. Or a static method that could be used in the tests compute real memory.
private double usableMemoryGb() { final double reservedMemoryGb = 1; double usableMemoryGb = resources.memoryGb() - reservedMemoryGb; if (!combined) { return usableMemoryGb; } double fractionTakenByContainer = (double)ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster / 100; return usableMemoryGb * (1 - fractionTakenByContainer); }
final double reservedMemoryGb = 1;
private double usableMemoryGb() { double usableMemoryGb = resources.memoryGb() - reservedMemoryGb; if (!combined) { return usableMemoryGb; } double fractionTakenByContainer = (double)ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster / 100; return usableMemoryGb * (1 - fractionTakenByContainer); }
class NodeResourcesTuning implements ProtonConfig.Producer { final static long MB = 1024 * 1024; final static long GB = MB * 1024; private final NodeResources resources; private final int redundancy; private final int searchableCopies; private final int threadsPerSearch; private final boolean combined; public NodeResourcesTuning(NodeResources resources, int redundancy, int searchableCopies, int threadsPerSearch, boolean combined) { this.resources = resources; this.redundancy = redundancy; this.searchableCopies = searchableCopies; this.threadsPerSearch = threadsPerSearch; this.combined = combined; } @Override public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneRequestThreads(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); tuneSummaryReadIo(builder.summary.read); tuneSummaryCache(builder.summary.cache); tuneSearchReadIo(builder.search.mmap); for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) { getConfig(dbb); } } private void getConfig(ProtonConfig.Documentdb.Builder builder) { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)usableMemoryGb() * GB / 64L; builder.allocation.initialnumdocs(numDocs/Math.max(searchableCopies, redundancy)); } } private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) { long memoryLimitBytes = (long) ((usableMemoryGb() * 0.05) * GB); builder.maxbytes(memoryLimitBytes); } private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.shared(true); builder.hwinfo.cpu.cores((int)resources.vcpu()); builder.hwinfo.memory.size((long)(usableMemoryGb() * GB)); builder.hwinfo.disk.size((long)(resources.diskGb() * GB)); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (resources.diskSpeed() != NodeResources.DiskSpeed.fast) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = usableMemoryGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((usableMemoryGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((resources.diskGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) { if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) { builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO); } } private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) { if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) { builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM); } } private void tuneRequestThreads(ProtonConfig.Builder builder) { int numCores = (int)Math.ceil(resources.vcpu()); builder.numsearcherthreads(numCores*threadsPerSearch); builder.numsummarythreads(numCores); builder.numthreadspersearch(threadsPerSearch); } /** Returns the memory we can expect will be available for the content node processes */ }
class NodeResourcesTuning implements ProtonConfig.Producer { final static long MB = 1024 * 1024; public final static long GB = MB * 1024; private final NodeResources resources; private final int redundancy; private final int searchableCopies; private final int threadsPerSearch; private final boolean combined; public static final double reservedMemoryGb = 1; public NodeResourcesTuning(NodeResources resources, int redundancy, int searchableCopies, int threadsPerSearch, boolean combined) { this.resources = resources; this.redundancy = redundancy; this.searchableCopies = searchableCopies; this.threadsPerSearch = threadsPerSearch; this.combined = combined; } @Override public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneRequestThreads(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); tuneSummaryReadIo(builder.summary.read); tuneSummaryCache(builder.summary.cache); tuneSearchReadIo(builder.search.mmap); for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) { getConfig(dbb); } } private void getConfig(ProtonConfig.Documentdb.Builder builder) { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)usableMemoryGb() * GB / 64L; builder.allocation.initialnumdocs(numDocs/Math.max(searchableCopies, redundancy)); } } private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) { long memoryLimitBytes = (long) ((usableMemoryGb() * 0.05) * GB); builder.maxbytes(memoryLimitBytes); } private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.shared(true); builder.hwinfo.cpu.cores((int)resources.vcpu()); builder.hwinfo.memory.size((long)(usableMemoryGb() * GB)); builder.hwinfo.disk.size((long)(resources.diskGb() * GB)); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (resources.diskSpeed() != NodeResources.DiskSpeed.fast) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = usableMemoryGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((usableMemoryGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((resources.diskGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) { if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) { builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO); } } private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) { if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) { builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM); } } private void tuneRequestThreads(ProtonConfig.Builder builder) { int numCores = (int)Math.ceil(resources.vcpu()); builder.numsearcherthreads(numCores*threadsPerSearch); builder.numsummarythreads(numCores); builder.numthreadspersearch(threadsPerSearch); } /** Returns the memory we can expect will be available for the content node processes */ }
Yes, I agree. Fixed. @baldersheim PTAL.
private double usableMemoryGb() { final double reservedMemoryGb = 1; double usableMemoryGb = resources.memoryGb() - reservedMemoryGb; if (!combined) { return usableMemoryGb; } double fractionTakenByContainer = (double)ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster / 100; return usableMemoryGb * (1 - fractionTakenByContainer); }
final double reservedMemoryGb = 1;
private double usableMemoryGb() { double usableMemoryGb = resources.memoryGb() - reservedMemoryGb; if (!combined) { return usableMemoryGb; } double fractionTakenByContainer = (double)ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster / 100; return usableMemoryGb * (1 - fractionTakenByContainer); }
class NodeResourcesTuning implements ProtonConfig.Producer { final static long MB = 1024 * 1024; final static long GB = MB * 1024; private final NodeResources resources; private final int redundancy; private final int searchableCopies; private final int threadsPerSearch; private final boolean combined; public NodeResourcesTuning(NodeResources resources, int redundancy, int searchableCopies, int threadsPerSearch, boolean combined) { this.resources = resources; this.redundancy = redundancy; this.searchableCopies = searchableCopies; this.threadsPerSearch = threadsPerSearch; this.combined = combined; } @Override public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneRequestThreads(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); tuneSummaryReadIo(builder.summary.read); tuneSummaryCache(builder.summary.cache); tuneSearchReadIo(builder.search.mmap); for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) { getConfig(dbb); } } private void getConfig(ProtonConfig.Documentdb.Builder builder) { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)usableMemoryGb() * GB / 64L; builder.allocation.initialnumdocs(numDocs/Math.max(searchableCopies, redundancy)); } } private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) { long memoryLimitBytes = (long) ((usableMemoryGb() * 0.05) * GB); builder.maxbytes(memoryLimitBytes); } private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.shared(true); builder.hwinfo.cpu.cores((int)resources.vcpu()); builder.hwinfo.memory.size((long)(usableMemoryGb() * GB)); builder.hwinfo.disk.size((long)(resources.diskGb() * GB)); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (resources.diskSpeed() != NodeResources.DiskSpeed.fast) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = usableMemoryGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((usableMemoryGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((resources.diskGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) { if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) { builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO); } } private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) { if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) { builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM); } } private void tuneRequestThreads(ProtonConfig.Builder builder) { int numCores = (int)Math.ceil(resources.vcpu()); builder.numsearcherthreads(numCores*threadsPerSearch); builder.numsummarythreads(numCores); builder.numthreadspersearch(threadsPerSearch); } /** Returns the memory we can expect will be available for the content node processes */ }
class NodeResourcesTuning implements ProtonConfig.Producer { final static long MB = 1024 * 1024; public final static long GB = MB * 1024; private final NodeResources resources; private final int redundancy; private final int searchableCopies; private final int threadsPerSearch; private final boolean combined; public static final double reservedMemoryGb = 1; public NodeResourcesTuning(NodeResources resources, int redundancy, int searchableCopies, int threadsPerSearch, boolean combined) { this.resources = resources; this.redundancy = redundancy; this.searchableCopies = searchableCopies; this.threadsPerSearch = threadsPerSearch; this.combined = combined; } @Override public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneRequestThreads(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); tuneSummaryReadIo(builder.summary.read); tuneSummaryCache(builder.summary.cache); tuneSearchReadIo(builder.search.mmap); for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) { getConfig(dbb); } } private void getConfig(ProtonConfig.Documentdb.Builder builder) { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)usableMemoryGb() * GB / 64L; builder.allocation.initialnumdocs(numDocs/Math.max(searchableCopies, redundancy)); } } private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) { long memoryLimitBytes = (long) ((usableMemoryGb() * 0.05) * GB); builder.maxbytes(memoryLimitBytes); } private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.shared(true); builder.hwinfo.cpu.cores((int)resources.vcpu()); builder.hwinfo.memory.size((long)(usableMemoryGb() * GB)); builder.hwinfo.disk.size((long)(resources.diskGb() * GB)); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (resources.diskSpeed() != NodeResources.DiskSpeed.fast) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = usableMemoryGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((usableMemoryGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((resources.diskGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) { if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) { builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO); } } private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) { if (resources.diskSpeed() == NodeResources.DiskSpeed.fast) { builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM); } } private void tuneRequestThreads(ProtonConfig.Builder builder) { int numCores = (int)Math.ceil(resources.vcpu()); builder.numsearcherthreads(numCores*threadsPerSearch); builder.numsummarythreads(numCores); builder.numthreadspersearch(threadsPerSearch); } /** Returns the memory we can expect will be available for the content node processes */ }
4 is not equal to 5, so what happens here?
public void testRequiredNodesAndDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 4; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); tester.createModel(services, false); }
int numberOfHosts = 4;
public void testRequiredNodesAndDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 4; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); tester.createModel(services, false); }
class ModelProvisioningTest { @Test public void testNodesJdisc() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>\n" + "\n" + "<admin version='3.0'><nodes count='1' /></admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count=\"3\"/>" + "</container>" + "<container id='mydisc2' version='1.0'>" + " <document-processing/>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count='2' allocated-memory='45%' jvm-gc-options='-XX:+UseParNewGC' jvm-options='-verbosegc' preload='lib/blablamalloc.so'/>" + "</container>" + "</services>"; String hosts ="<hosts>" + " <host name='myhost0'>" + " <alias>node0</alias>" + " </host>" + " <host name='myhost1'>" + " <alias>node1</alias>" + " </host>" + " <host name='myhost2'>" + " <alias>node2</alias>" + " </host>" + " <host name='myhost3'>" + " <alias>node3</alias>" + " </host>" + " <host name='myhost4'>" + " <alias>node4</alias>" + " </host>" + " <host name='myhost5'>" + " <alias>node5</alias>" + " </host>" + "</hosts>"; VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services); VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false))); ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc"); ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2"); assertEquals(3, mydisc.getContainers().size()); assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId())); assertTrue(mydisc.getContainers().get(0).isInitialized()); assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId()); assertTrue(mydisc.getContainers().get(1).isInitialized()); assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId()); assertTrue(mydisc.getContainers().get(2).isInitialized()); assertEquals(2, mydisc2.getContainers().size()); assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId()); assertTrue(mydisc2.getContainers().get(0).isInitialized()); assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId()); assertTrue(mydisc2.getContainers().get(1).isInitialized()); assertEquals("", mydisc.getContainers().get(0).getJvmOptions()); assertEquals("", mydisc.getContainers().get(1).getJvmOptions()); assertEquals("", mydisc.getContainers().get(2).getJvmOptions()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad()); assertEquals(Optional.empty(), mydisc.getMemoryPercentage()); assertEquals("-verbosegc", mydisc2.getContainers().get(0).getJvmOptions()); assertEquals("-verbosegc", mydisc2.getContainers().get(1).getJvmOptions()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad()); assertEquals(Optional.of(45), mydisc2.getMemoryPercentage()); assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); mydisc2.getConfig(qrStartBuilder); QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); HostSystem hostSystem = model.hostSystem(); assertNotNull(hostSystem.getHostByHostname("myhost0")); assertNotNull(hostSystem.getHostByHostname("myhost1")); assertNotNull(hostSystem.getHostByHostname("myhost2")); assertNotNull(hostSystem.getHostByHostname("myhost3")); assertNull(hostSystem.getHostByHostname("Nope")); } @Test public void testNodeCountForContentGroup() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + "\n" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); int numberOfHosts = 2; tester.addHosts(numberOfHosts); int numberOfContentNodes = 2; VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); Map<String, ContentCluster> contentClusters = model.getContentClusters(); ContentCluster cluster = contentClusters.get("bar"); assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size()); int i = 0; for (StorageNode node : cluster.getRootGroup().getNodes()) assertEquals(i++, node.getDistributionKey()); } @Test public void testSeparateClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in cluster without ID", 2, model.getContentClusters().get("content").getRootGroup().getNodes().size()); assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model); assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model); } @Test public void testClusterMembership() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes count='1'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(1, model.hostSystem().getHosts().size()); HostResource host = model.hostSystem().getHosts().iterator().next(); assertTrue(host.spec().membership().isPresent()); assertEquals("container", host.spec().membership().get().cluster().type().name()); assertEquals("container1", host.spec().membership().get().cluster().id().value()); } @Test public void testCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes of='content1'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is lowered with combined clusters", 17, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is lowered to account for the jvm heap", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.17)), protonMemorySize(model.getContentClusters().get("content1"))); assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model); } } /** For comparison with the above */ @Test public void testNonCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes count='2'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is normal", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is normal", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1"))); } } @Test public void testCombinedClusterWithJvmOptions() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <document-processing/>" + " <nodes of='content1' jvm-options='testoption'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); for (Container container : model.getContainerClusters().get("container1").getContainers()) assertTrue(container.getJvmOptions().contains("testoption")); } @Test public void testMultipleCombinedClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='content1'/>" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes of='content2'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0' id='content2'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size()); assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size()); } @Test public void testNonExistingCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e)); } } @Test public void testInvalidCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes count='2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e)); } } @Test public void testUsingNodesAndGroupCountAttributes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='9'/>" + " </content>" + " <content version='1.0' id='baz'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='27'/>" + " </content>" + "</services>"; int numberOfHosts = 64; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getContainerClusters().size()); Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream() .map(Container::getHost) .collect(Collectors.toSet()); assertEquals(10, containerHosts.size()); Admin admin = model.getAdmin(); Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet()); assertEquals(3, slobrokHosts.size()); assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts)); assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost())); assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size()); assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers()); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-54", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-51", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-48", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(9, cluster.getRootGroup().getSubgroups().size()); assertEquals("0", cluster.getRootGroup().getSubgroups().get(0).getIndex()); assertEquals(3, cluster.getRootGroup().getSubgroups().get(0).getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3")); assertEquals("node-1-3-10-51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5")); assertEquals("node-1-3-10-48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26")); cluster = model.getContentClusters().get("baz"); clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("baz-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-27", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-26", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-25", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(27, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0")); assertEquals("node-1-3-10-27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1")); assertEquals("node-1-3-10-26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26")); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26")); } @Test public void testGroupsOfSize1() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='8'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-07", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(8, cluster.getRootGroup().getSubgroups().size()); assertEquals(8, cluster.distributionBits()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1")); assertEquals("node-1-3-10-07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7")); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7")); assertEquals("node-1-3-10-01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName()); } @Test public void testExplicitNonDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='false' count='6'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals( 8, cluster.distributionBits()); assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-09", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-05", clusterControllers.getContainers().get(3).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(4).getHostName()); assertEquals("node-1-3-10-09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName()); assertEquals("node-1-3-10-06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); } @Test public void testClusterControllersWithGroupSize2() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='4'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(2).getHostName()); } @Test public void testClusterControllersIncludeNonRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09", "node-1-3-10-06", "node-1-3-10-03"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3 + 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("Non-retired", "node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("Non-retired", "node-1-3-10-05", clusterControllers.getContainers().get(1).getHostName()); assertEquals("Non-retired", "node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("Retired", "node-1-3-10-09", clusterControllers.getContainers().get(3).getHostName()); assertEquals("Retired", "node-1-3-10-06", clusterControllers.getContainers().get(4).getHostName()); assertEquals("Retired", "node-1-3-10-03", clusterControllers.getContainers().get(5).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-07", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-09", model.getAdmin().getSlobroks().get(3).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-01", "node-1-3-10-02"); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-09", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-01", model.getAdmin().getSlobroks().get(4).getHostName()); } @Test public void testSlobroksAreSpreadOverAllContainerClusters() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <container version='1.0' id='bar'>" + " <nodes count='3'/>" + " </container>" + "</services>"; int numberOfHosts = 13; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-12", "node-1-3-10-03", "node-1-3-10-02"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-13", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-12", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("node-1-3-10-01", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-03", model.getAdmin().getSlobroks().get(4).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(5).getHostName()); } @Test public void test2ContentNodesProduces1ClusterController() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); } @Test public void testDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + "</services>"; int numberOfHosts = 7; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(7, model.getRoot().hostSystem().getHosts().size()); assertNull(model.getContentClusters().get("foo").getClusterControllers()); assertNull(model.getContentClusters().get("bar").getClusterControllers()); ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("cluster-controllers", clusterControllers.getName()); clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> { assertTrue(host.spec().membership().get().cluster().isStateful()); assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type()); }); } @Test public void testExplicitDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='true' count='4'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(4, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(3).getHostName()); } @Test public void testLogserverContainerWhenDedicatedLogserver() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'/>" + " </logservers>" + " </admin>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = false; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testImplicitLogserverContainer() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = true; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveReadyCopies()); assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(3, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5")); } @Test public void testUsingNodesCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <container version='1.0' id='container'>" + " <search/>" + " <nodes count='2'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(4, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(4, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(4, cluster.redundancy().effectiveReadyCopies()); assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(4, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(4)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3")); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(0).getHostName()); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test(expected = IllegalArgumentException.class) public void testRequiringMoreNodesThanAreAvailable() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.createModel(services, false); } @Test(expected = IllegalArgumentException.class) @Test public void testExclusiveDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(5, model.hostSystem().getHosts().size()); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testExclusiveNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<container version='1.0' id='container'>" + " <nodes count='2' exclusive='true'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testUsingNodesCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test public void testRequestingSpecificNodeResources() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'>" + " <resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>" + " </nodes>" + " </logservers>" + " <slobroks>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>" + " </nodes>" + " </slobroks>" + " </admin>" + " <container version='1.0' id='container'>" + " <nodes count='4'>" + " <resources vcpu='12' memory='10Gb' disk='30Gb'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.8' memory='3Gb' disk='2Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='5'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='3' dedicated='true'>" + " <resources vcpu='0.7' memory='2Gb' disk='2.5Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='6'>" + " <resources vcpu='10' memory='64Gb' disk='200Gb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1); tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2); tester.addHosts(new NodeResources(12, 10, 30, 0.3), 4); tester.addHosts(new NodeResources(0.8, 3, 2, 0.3), 2); tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5); tester.addHosts(new NodeResources(0.7, 2, 2.5, 0.3), 3); tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6); VespaModel model = tester.createModel(services, true, 0); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMin() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMax() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 26; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testContainerOnly() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(3, model.getContainerClusters().get("container").getContainers().size()); assertNotNull(model.getAdmin().getLogserver()); assertEquals(3, model.getAdmin().getSlobroks().size()); } @Test public void testJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvmargs='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptions() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptionsOverridesJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' jvmargs='abc' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); try { tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage()); } } @Test public void testUsingHostaliasWithProvisioner() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<admin version='2.0'>" + " <adminserver hostalias='node1'/>\n"+ "</admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + "</container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); } @Test public void testThatStandaloneSyntaxWorksOnHostedVespa() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() { try { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='8095' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " + getDefaults().vespaWebServicePort(), e.getMessage()); } } @Test public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='foo' version='1.0'>" + " <nodes>" + " <node hostalias='node1'/>" + " <node hostalias='node2'/>" + " </nodes>" + " </container>" + " <content id='bar' version='1.0'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <redundancy>2</redundancy>" + " <nodes>" + " <group>" + " <node distribution-key='0' hostalias='node3'/>" + " <node distribution-key='1' hostalias='node4'/>" + " </group>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(true); tester.addHosts(4); VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true); assertEquals("We get 1 node per cluster and no admin node", 2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Deploying an application with "nodes count" standalone should give a single-node deployment */ @Test public void testThatHostedSyntaxWorksOnStandalone() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(3); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in container cluster", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content cluster (downscaled)", 1, model.getContentClusters().get("content").getRootGroup().getNodes().size()); model.getConfig(new StorStatusConfig.Builder(), "default"); } @Test public void testNoNodeTagMeansTwoNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(3); VespaModel model = tester.createModel(services, true); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } @Test public void testNoNodeTagMeansTwoNodesNoContent() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testNoNodeTagMeans1NodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size()); } @Test public void testSingleNodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " <nodes><node hostalias='foo'/></nodes>"+ " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+ " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Recreate the combination used in some factory tests */ @Test public void testMultitenantButNotHosted() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testModelWithReferencedIndexingCluster() { String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(1, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testSharedNodesNotHosted() { String hosts = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<hosts>\n" + " <host name=\"vespa-1\">\n" + " <alias>vespa-1</alias>\n" + " </host>\n" + " <host name=\"vespa-2\">\n" + " <alias>vespa-2</alias>\n" + " </host>\n" + " <host name=\"vespa-3\">\n" + " <alias>vespa-3</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " <node hostalias=\"vespa-2\"/>\n" + " <node hostalias=\"vespa-3\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" + " <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedModel(false, hosts, services); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(3, content.getRootGroup().getNodes().size()); } @Test public void testMultitenantButNotHostedSharedContentNode() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " <content id='search' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " </group>" + " <documents>" + " <document type='type1'/>" + " </documents>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testStatefulProperty() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='qrs'>" + " <nodes count='1'/>" + " </container>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='3'/>" + " </container>" + " <content version='1.0' id='content'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(6); VespaModel model = tester.createModel(servicesXml, true); Map<String, Boolean> tests = Map.of("qrs", false, "zk", true, "content", true); Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream() .collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value())); tests.forEach((clusterId, stateful) -> { List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of()); assertFalse("Hosts are provisioned for '" + clusterId + "'", hosts.isEmpty()); assertEquals("Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful", stateful, hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful())); }); } @Test public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='4'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(servicesXml, true, "node-1-3-10-01"); ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count()); assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count()); } @Test public void containerWithZooKeeperJoiningServers() { Function<Integer, String> servicesXml = (nodeCount) -> { return "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='" + nodeCount + "'/>" + " </container>" + "</services>"; }; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(servicesXml.apply(3), true); { ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertTrue("Initial servers are not joining", config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining)); } { VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(5), true, false, 0, Optional.of(model)); ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertEquals("New nodes are joining", Map.of(0, false, 1, false, 2, false, 3, true, 4, true), config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id, ZookeeperServerConfig.Server::joining))); } } private VespaModel createNonProvisionedMultitenantModel(String services) { return createNonProvisionedModel(true, null, services); } private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) { VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1")); ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg; DeployState deployState = new DeployState.Builder().applicationPackage(appPkg). properties((new TestProperties()).setMultitenant(multitenant)). build(); return modelCreatorWithMockPkg.create(false, deployState); } private int physicalMemoryPercentage(ContainerCluster cluster) { QrStartConfig.Builder b = new QrStartConfig.Builder(); cluster.getConfig(b); return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory(); } private long protonMemorySize(ContentCluster cluster) { ProtonConfig.Builder b = new ProtonConfig.Builder(); cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b); return b.build().hwinfo().memory().size(); } @Test public void require_that_proton_config_is_tuned_based_on_node_resources() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='2'>", " <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>", " </nodes>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 5, NodeResources.DiskSpeed.slow), 2); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); assertEquals(2, cluster.getSearchNodes().size()); assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001); assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001); } private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); List<SearchNode> searchNodes = cluster.getSearchNodes(); assertTrue(searchNodeIdx < searchNodes.size()); searchNodes.get(searchNodeIdx).getConfig(builder); return new ProtonConfig(builder); } @Test public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <config name='vespa.config.search.core.proton'>", " <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>", " </config>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='1'>", " <resources vcpu='1' memory='128Gb' disk='100Gb'/>", " </nodes>", " <engine>", " <proton>", " <resource-limits>", " <memory>0.92</memory>", " </resource-limits>", " <tuning>", " <searchnode>", " <flushstrategy>", " <native>", " <total>", " <maxmemorygain>1000</maxmemorygain>", " </total>", " </native>", " </flushstrategy>", " </searchnode>", " </tuning>", " </proton>", " </engine>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 1), 1); tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId()); assertEquals(2000, cfg.flush().memory().maxtlssize()); assertEquals(1000, cfg.flush().memory().maxmemory()); assertEquals((long) (128 - reservedMemoryGb) * GB / 8, cfg.flush().memory().each().maxmemory()); assertEquals(0.92, cfg.writefilter().memorylimit(), 0.0001); } private static ProtonConfig getProtonConfig(VespaModel model, String configId) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); model.getConfig(builder, configId); return new ProtonConfig(builder); } private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) { int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(Zone.defaultZone(), services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); Admin admin = model.getAdmin(); Logserver logserver = admin.getLogserver(); HostResource hostResource = logserver.getHostResource(); assertNotNull(hostResource.getService("logserver")); String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName; assertNotNull(hostResource.getService(containerServiceType)); String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId(); ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder(); model.getConfig(builder, configId); ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder); assertEquals(1, cfg.generation()); LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder(); model.getConfig(logdConfigBuilder, configId); LogdConfig logdConfig = new LogdConfig(logdConfigBuilder); assertTrue(logdConfig.logserver().use()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId, ClusterSpec.Type type, VespaModel model) { assertEquals("Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""), nodeCount, model.hostSystem().getHosts().stream() .map(h -> h.spec().membership().get().cluster()) .filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId))) .count()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) { assertProvisioned(nodeCount, id, null, type, model); } }
class ModelProvisioningTest { @Test public void testNodesJdisc() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>\n" + "\n" + "<admin version='3.0'><nodes count='1' /></admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count=\"3\"/>" + "</container>" + "<container id='mydisc2' version='1.0'>" + " <document-processing/>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count='2' allocated-memory='45%' jvm-gc-options='-XX:+UseParNewGC' jvm-options='-verbosegc' preload='lib/blablamalloc.so'/>" + "</container>" + "</services>"; String hosts ="<hosts>" + " <host name='myhost0'>" + " <alias>node0</alias>" + " </host>" + " <host name='myhost1'>" + " <alias>node1</alias>" + " </host>" + " <host name='myhost2'>" + " <alias>node2</alias>" + " </host>" + " <host name='myhost3'>" + " <alias>node3</alias>" + " </host>" + " <host name='myhost4'>" + " <alias>node4</alias>" + " </host>" + " <host name='myhost5'>" + " <alias>node5</alias>" + " </host>" + "</hosts>"; VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services); VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false))); ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc"); ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2"); assertEquals(3, mydisc.getContainers().size()); assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId())); assertTrue(mydisc.getContainers().get(0).isInitialized()); assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId()); assertTrue(mydisc.getContainers().get(1).isInitialized()); assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId()); assertTrue(mydisc.getContainers().get(2).isInitialized()); assertEquals(2, mydisc2.getContainers().size()); assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId()); assertTrue(mydisc2.getContainers().get(0).isInitialized()); assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId()); assertTrue(mydisc2.getContainers().get(1).isInitialized()); assertEquals("", mydisc.getContainers().get(0).getJvmOptions()); assertEquals("", mydisc.getContainers().get(1).getJvmOptions()); assertEquals("", mydisc.getContainers().get(2).getJvmOptions()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad()); assertEquals(Optional.empty(), mydisc.getMemoryPercentage()); assertEquals("-verbosegc", mydisc2.getContainers().get(0).getJvmOptions()); assertEquals("-verbosegc", mydisc2.getContainers().get(1).getJvmOptions()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad()); assertEquals(Optional.of(45), mydisc2.getMemoryPercentage()); assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); mydisc2.getConfig(qrStartBuilder); QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); HostSystem hostSystem = model.hostSystem(); assertNotNull(hostSystem.getHostByHostname("myhost0")); assertNotNull(hostSystem.getHostByHostname("myhost1")); assertNotNull(hostSystem.getHostByHostname("myhost2")); assertNotNull(hostSystem.getHostByHostname("myhost3")); assertNull(hostSystem.getHostByHostname("Nope")); } @Test public void testNodeCountForContentGroup() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + "\n" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); int numberOfHosts = 2; tester.addHosts(numberOfHosts); int numberOfContentNodes = 2; VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); Map<String, ContentCluster> contentClusters = model.getContentClusters(); ContentCluster cluster = contentClusters.get("bar"); assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size()); int i = 0; for (StorageNode node : cluster.getRootGroup().getNodes()) assertEquals(i++, node.getDistributionKey()); } @Test public void testSeparateClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in cluster without ID", 2, model.getContentClusters().get("content").getRootGroup().getNodes().size()); assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model); assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model); } @Test public void testClusterMembership() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes count='1'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(1, model.hostSystem().getHosts().size()); HostResource host = model.hostSystem().getHosts().iterator().next(); assertTrue(host.spec().membership().isPresent()); assertEquals("container", host.spec().membership().get().cluster().type().name()); assertEquals("container1", host.spec().membership().get().cluster().id().value()); } @Test public void testCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes of='content1'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is lowered with combined clusters", 17, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is lowered to account for the jvm heap", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.17)), protonMemorySize(model.getContentClusters().get("content1"))); assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model); } } /** For comparison with the above */ @Test public void testNonCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes count='2'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is normal", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is normal", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1"))); } } @Test public void testCombinedClusterWithJvmOptions() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <document-processing/>" + " <nodes of='content1' jvm-options='testoption'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); for (Container container : model.getContainerClusters().get("container1").getContainers()) assertTrue(container.getJvmOptions().contains("testoption")); } @Test public void testMultipleCombinedClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='content1'/>" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes of='content2'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0' id='content2'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size()); assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size()); } @Test public void testNonExistingCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e)); } } @Test public void testInvalidCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes count='2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e)); } } @Test public void testUsingNodesAndGroupCountAttributes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='9'/>" + " </content>" + " <content version='1.0' id='baz'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='27'/>" + " </content>" + "</services>"; int numberOfHosts = 64; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getContainerClusters().size()); Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream() .map(Container::getHost) .collect(Collectors.toSet()); assertEquals(10, containerHosts.size()); Admin admin = model.getAdmin(); Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet()); assertEquals(3, slobrokHosts.size()); assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts)); assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost())); assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size()); assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers()); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-54", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-51", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-48", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(9, cluster.getRootGroup().getSubgroups().size()); assertEquals("0", cluster.getRootGroup().getSubgroups().get(0).getIndex()); assertEquals(3, cluster.getRootGroup().getSubgroups().get(0).getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3")); assertEquals("node-1-3-10-51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5")); assertEquals("node-1-3-10-48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26")); cluster = model.getContentClusters().get("baz"); clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("baz-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-27", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-26", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-25", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(27, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0")); assertEquals("node-1-3-10-27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1")); assertEquals("node-1-3-10-26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26")); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26")); } @Test public void testGroupsOfSize1() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='8'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-07", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(8, cluster.getRootGroup().getSubgroups().size()); assertEquals(8, cluster.distributionBits()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1")); assertEquals("node-1-3-10-07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7")); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7")); assertEquals("node-1-3-10-01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName()); } @Test public void testExplicitNonDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='false' count='6'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals( 8, cluster.distributionBits()); assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-09", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-05", clusterControllers.getContainers().get(3).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(4).getHostName()); assertEquals("node-1-3-10-09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName()); assertEquals("node-1-3-10-06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); } @Test public void testClusterControllersWithGroupSize2() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='4'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(2).getHostName()); } @Test public void testClusterControllersIncludeNonRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09", "node-1-3-10-06", "node-1-3-10-03"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3 + 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("Non-retired", "node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("Non-retired", "node-1-3-10-05", clusterControllers.getContainers().get(1).getHostName()); assertEquals("Non-retired", "node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("Retired", "node-1-3-10-09", clusterControllers.getContainers().get(3).getHostName()); assertEquals("Retired", "node-1-3-10-06", clusterControllers.getContainers().get(4).getHostName()); assertEquals("Retired", "node-1-3-10-03", clusterControllers.getContainers().get(5).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-07", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-09", model.getAdmin().getSlobroks().get(3).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-01", "node-1-3-10-02"); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-09", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-01", model.getAdmin().getSlobroks().get(4).getHostName()); } @Test public void testSlobroksAreSpreadOverAllContainerClusters() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <container version='1.0' id='bar'>" + " <nodes count='3'/>" + " </container>" + "</services>"; int numberOfHosts = 13; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-12", "node-1-3-10-03", "node-1-3-10-02"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-13", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-12", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("node-1-3-10-01", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-03", model.getAdmin().getSlobroks().get(4).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(5).getHostName()); } @Test public void test2ContentNodesProduces1ClusterController() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); } @Test public void testDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + "</services>"; int numberOfHosts = 7; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(7, model.getRoot().hostSystem().getHosts().size()); assertNull(model.getContentClusters().get("foo").getClusterControllers()); assertNull(model.getContentClusters().get("bar").getClusterControllers()); ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("cluster-controllers", clusterControllers.getName()); clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> { assertTrue(host.spec().membership().get().cluster().isStateful()); assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type()); }); } @Test public void testExplicitDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='true' count='4'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(4, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(3).getHostName()); } @Test public void testLogserverContainerWhenDedicatedLogserver() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'/>" + " </logservers>" + " </admin>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = false; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testImplicitLogserverContainer() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = true; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveReadyCopies()); assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(3, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5")); } @Test public void testUsingNodesCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <container version='1.0' id='container'>" + " <search/>" + " <nodes count='2'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(4, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(4, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(4, cluster.redundancy().effectiveReadyCopies()); assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(4, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(4)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3")); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(0).getHostName()); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test(expected = IllegalArgumentException.class) public void testRequiringMoreNodesThanAreAvailable() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.createModel(services, false); } @Test(expected = IllegalArgumentException.class) @Test public void testExclusiveDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(5, model.hostSystem().getHosts().size()); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testExclusiveNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<container version='1.0' id='container'>" + " <nodes count='2' exclusive='true'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testUsingNodesCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test public void testRequestingSpecificNodeResources() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'>" + " <resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>" + " </nodes>" + " </logservers>" + " <slobroks>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>" + " </nodes>" + " </slobroks>" + " </admin>" + " <container version='1.0' id='container'>" + " <nodes count='4'>" + " <resources vcpu='12' memory='10Gb' disk='30Gb'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.8' memory='3Gb' disk='2Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='5'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='3' dedicated='true'>" + " <resources vcpu='0.7' memory='2Gb' disk='2.5Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='6'>" + " <resources vcpu='10' memory='64Gb' disk='200Gb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1); tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2); tester.addHosts(new NodeResources(12, 10, 30, 0.3), 4); tester.addHosts(new NodeResources(0.8, 3, 2, 0.3), 2); tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5); tester.addHosts(new NodeResources(0.7, 2, 2.5, 0.3), 3); tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6); VespaModel model = tester.createModel(services, true, 0); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMin() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMax() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 26; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testContainerOnly() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(3, model.getContainerClusters().get("container").getContainers().size()); assertNotNull(model.getAdmin().getLogserver()); assertEquals(3, model.getAdmin().getSlobroks().size()); } @Test public void testJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvmargs='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptions() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptionsOverridesJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' jvmargs='abc' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); try { tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage()); } } @Test public void testUsingHostaliasWithProvisioner() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<admin version='2.0'>" + " <adminserver hostalias='node1'/>\n"+ "</admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + "</container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); } @Test public void testThatStandaloneSyntaxWorksOnHostedVespa() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() { try { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='8095' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " + getDefaults().vespaWebServicePort(), e.getMessage()); } } @Test public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='foo' version='1.0'>" + " <nodes>" + " <node hostalias='node1'/>" + " <node hostalias='node2'/>" + " </nodes>" + " </container>" + " <content id='bar' version='1.0'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <redundancy>2</redundancy>" + " <nodes>" + " <group>" + " <node distribution-key='0' hostalias='node3'/>" + " <node distribution-key='1' hostalias='node4'/>" + " </group>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(true); tester.addHosts(4); VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true); assertEquals("We get 1 node per cluster and no admin node", 2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Deploying an application with "nodes count" standalone should give a single-node deployment */ @Test public void testThatHostedSyntaxWorksOnStandalone() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(3); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in container cluster", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content cluster (downscaled)", 1, model.getContentClusters().get("content").getRootGroup().getNodes().size()); model.getConfig(new StorStatusConfig.Builder(), "default"); } @Test public void testNoNodeTagMeansTwoNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(3); VespaModel model = tester.createModel(services, true); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } @Test public void testNoNodeTagMeansTwoNodesNoContent() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testNoNodeTagMeans1NodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size()); } @Test public void testSingleNodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " <nodes><node hostalias='foo'/></nodes>"+ " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+ " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Recreate the combination used in some factory tests */ @Test public void testMultitenantButNotHosted() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testModelWithReferencedIndexingCluster() { String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(1, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testSharedNodesNotHosted() { String hosts = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<hosts>\n" + " <host name=\"vespa-1\">\n" + " <alias>vespa-1</alias>\n" + " </host>\n" + " <host name=\"vespa-2\">\n" + " <alias>vespa-2</alias>\n" + " </host>\n" + " <host name=\"vespa-3\">\n" + " <alias>vespa-3</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " <node hostalias=\"vespa-2\"/>\n" + " <node hostalias=\"vespa-3\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" + " <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedModel(false, hosts, services); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(3, content.getRootGroup().getNodes().size()); } @Test public void testMultitenantButNotHostedSharedContentNode() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " <content id='search' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " </group>" + " <documents>" + " <document type='type1'/>" + " </documents>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testStatefulProperty() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='qrs'>" + " <nodes count='1'/>" + " </container>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='3'/>" + " </container>" + " <content version='1.0' id='content'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(6); VespaModel model = tester.createModel(servicesXml, true); Map<String, Boolean> tests = Map.of("qrs", false, "zk", true, "content", true); Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream() .collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value())); tests.forEach((clusterId, stateful) -> { List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of()); assertFalse("Hosts are provisioned for '" + clusterId + "'", hosts.isEmpty()); assertEquals("Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful", stateful, hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful())); }); } @Test public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='4'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(servicesXml, true, "node-1-3-10-01"); ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count()); assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count()); } @Test public void containerWithZooKeeperJoiningServers() { Function<Integer, String> servicesXml = (nodeCount) -> { return "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='" + nodeCount + "'/>" + " </container>" + "</services>"; }; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(servicesXml.apply(3), true); { ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertTrue("Initial servers are not joining", config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining)); } { VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(5), true, false, 0, Optional.of(model)); ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertEquals("New nodes are joining", Map.of(0, false, 1, false, 2, false, 3, true, 4, true), config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id, ZookeeperServerConfig.Server::joining))); } } private VespaModel createNonProvisionedMultitenantModel(String services) { return createNonProvisionedModel(true, null, services); } private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) { VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1")); ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg; DeployState deployState = new DeployState.Builder().applicationPackage(appPkg). properties((new TestProperties()).setMultitenant(multitenant)). build(); return modelCreatorWithMockPkg.create(false, deployState); } private int physicalMemoryPercentage(ContainerCluster cluster) { QrStartConfig.Builder b = new QrStartConfig.Builder(); cluster.getConfig(b); return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory(); } private long protonMemorySize(ContentCluster cluster) { ProtonConfig.Builder b = new ProtonConfig.Builder(); cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b); return b.build().hwinfo().memory().size(); } @Test public void require_that_proton_config_is_tuned_based_on_node_resources() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='2'>", " <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>", " </nodes>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 5, NodeResources.DiskSpeed.slow), 2); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); assertEquals(2, cluster.getSearchNodes().size()); assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001); assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001); } private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); List<SearchNode> searchNodes = cluster.getSearchNodes(); assertTrue(searchNodeIdx < searchNodes.size()); searchNodes.get(searchNodeIdx).getConfig(builder); return new ProtonConfig(builder); } @Test public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <config name='vespa.config.search.core.proton'>", " <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>", " </config>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='1'>", " <resources vcpu='1' memory='128Gb' disk='100Gb'/>", " </nodes>", " <engine>", " <proton>", " <resource-limits>", " <memory>0.92</memory>", " </resource-limits>", " <tuning>", " <searchnode>", " <flushstrategy>", " <native>", " <total>", " <maxmemorygain>1000</maxmemorygain>", " </total>", " </native>", " </flushstrategy>", " </searchnode>", " </tuning>", " </proton>", " </engine>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 1), 1); tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId()); assertEquals(2000, cfg.flush().memory().maxtlssize()); assertEquals(1000, cfg.flush().memory().maxmemory()); assertEquals((long) (128 - reservedMemoryGb) * GB / 8, cfg.flush().memory().each().maxmemory()); assertEquals(0.92, cfg.writefilter().memorylimit(), 0.0001); } private static ProtonConfig getProtonConfig(VespaModel model, String configId) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); model.getConfig(builder, configId); return new ProtonConfig(builder); } private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) { int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(Zone.defaultZone(), services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); Admin admin = model.getAdmin(); Logserver logserver = admin.getLogserver(); HostResource hostResource = logserver.getHostResource(); assertNotNull(hostResource.getService("logserver")); String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName; assertNotNull(hostResource.getService(containerServiceType)); String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId(); ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder(); model.getConfig(builder, configId); ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder); assertEquals(1, cfg.generation()); LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder(); model.getConfig(logdConfigBuilder, configId); LogdConfig logdConfig = new LogdConfig(logdConfigBuilder); assertTrue(logdConfig.logserver().use()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId, ClusterSpec.Type type, VespaModel model) { assertEquals("Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""), nodeCount, model.hostSystem().getHosts().stream() .map(h -> h.spec().membership().get().cluster()) .filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId))) .count()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) { assertProvisioned(nodeCount, id, null, type, model); } }
That's why the exception is expected.
public void testRequiredNodesAndDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 4; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); tester.createModel(services, false); }
int numberOfHosts = 4;
public void testRequiredNodesAndDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 4; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); tester.createModel(services, false); }
class ModelProvisioningTest { @Test public void testNodesJdisc() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>\n" + "\n" + "<admin version='3.0'><nodes count='1' /></admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count=\"3\"/>" + "</container>" + "<container id='mydisc2' version='1.0'>" + " <document-processing/>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count='2' allocated-memory='45%' jvm-gc-options='-XX:+UseParNewGC' jvm-options='-verbosegc' preload='lib/blablamalloc.so'/>" + "</container>" + "</services>"; String hosts ="<hosts>" + " <host name='myhost0'>" + " <alias>node0</alias>" + " </host>" + " <host name='myhost1'>" + " <alias>node1</alias>" + " </host>" + " <host name='myhost2'>" + " <alias>node2</alias>" + " </host>" + " <host name='myhost3'>" + " <alias>node3</alias>" + " </host>" + " <host name='myhost4'>" + " <alias>node4</alias>" + " </host>" + " <host name='myhost5'>" + " <alias>node5</alias>" + " </host>" + "</hosts>"; VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services); VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false))); ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc"); ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2"); assertEquals(3, mydisc.getContainers().size()); assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId())); assertTrue(mydisc.getContainers().get(0).isInitialized()); assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId()); assertTrue(mydisc.getContainers().get(1).isInitialized()); assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId()); assertTrue(mydisc.getContainers().get(2).isInitialized()); assertEquals(2, mydisc2.getContainers().size()); assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId()); assertTrue(mydisc2.getContainers().get(0).isInitialized()); assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId()); assertTrue(mydisc2.getContainers().get(1).isInitialized()); assertEquals("", mydisc.getContainers().get(0).getJvmOptions()); assertEquals("", mydisc.getContainers().get(1).getJvmOptions()); assertEquals("", mydisc.getContainers().get(2).getJvmOptions()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad()); assertEquals(Optional.empty(), mydisc.getMemoryPercentage()); assertEquals("-verbosegc", mydisc2.getContainers().get(0).getJvmOptions()); assertEquals("-verbosegc", mydisc2.getContainers().get(1).getJvmOptions()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad()); assertEquals(Optional.of(45), mydisc2.getMemoryPercentage()); assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); mydisc2.getConfig(qrStartBuilder); QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); HostSystem hostSystem = model.hostSystem(); assertNotNull(hostSystem.getHostByHostname("myhost0")); assertNotNull(hostSystem.getHostByHostname("myhost1")); assertNotNull(hostSystem.getHostByHostname("myhost2")); assertNotNull(hostSystem.getHostByHostname("myhost3")); assertNull(hostSystem.getHostByHostname("Nope")); } @Test public void testNodeCountForContentGroup() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + "\n" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); int numberOfHosts = 2; tester.addHosts(numberOfHosts); int numberOfContentNodes = 2; VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); Map<String, ContentCluster> contentClusters = model.getContentClusters(); ContentCluster cluster = contentClusters.get("bar"); assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size()); int i = 0; for (StorageNode node : cluster.getRootGroup().getNodes()) assertEquals(i++, node.getDistributionKey()); } @Test public void testSeparateClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in cluster without ID", 2, model.getContentClusters().get("content").getRootGroup().getNodes().size()); assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model); assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model); } @Test public void testClusterMembership() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes count='1'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(1, model.hostSystem().getHosts().size()); HostResource host = model.hostSystem().getHosts().iterator().next(); assertTrue(host.spec().membership().isPresent()); assertEquals("container", host.spec().membership().get().cluster().type().name()); assertEquals("container1", host.spec().membership().get().cluster().id().value()); } @Test public void testCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes of='content1'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is lowered with combined clusters", 17, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is lowered to account for the jvm heap", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.17)), protonMemorySize(model.getContentClusters().get("content1"))); assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model); } } /** For comparison with the above */ @Test public void testNonCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes count='2'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is normal", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is normal", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1"))); } } @Test public void testCombinedClusterWithJvmOptions() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <document-processing/>" + " <nodes of='content1' jvm-options='testoption'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); for (Container container : model.getContainerClusters().get("container1").getContainers()) assertTrue(container.getJvmOptions().contains("testoption")); } @Test public void testMultipleCombinedClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='content1'/>" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes of='content2'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0' id='content2'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size()); assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size()); } @Test public void testNonExistingCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e)); } } @Test public void testInvalidCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes count='2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e)); } } @Test public void testUsingNodesAndGroupCountAttributes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='9'/>" + " </content>" + " <content version='1.0' id='baz'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='27'/>" + " </content>" + "</services>"; int numberOfHosts = 64; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getContainerClusters().size()); Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream() .map(Container::getHost) .collect(Collectors.toSet()); assertEquals(10, containerHosts.size()); Admin admin = model.getAdmin(); Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet()); assertEquals(3, slobrokHosts.size()); assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts)); assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost())); assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size()); assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers()); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-54", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-51", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-48", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(9, cluster.getRootGroup().getSubgroups().size()); assertEquals("0", cluster.getRootGroup().getSubgroups().get(0).getIndex()); assertEquals(3, cluster.getRootGroup().getSubgroups().get(0).getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3")); assertEquals("node-1-3-10-51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5")); assertEquals("node-1-3-10-48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26")); cluster = model.getContentClusters().get("baz"); clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("baz-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-27", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-26", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-25", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(27, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0")); assertEquals("node-1-3-10-27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1")); assertEquals("node-1-3-10-26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26")); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26")); } @Test public void testGroupsOfSize1() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='8'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-07", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(8, cluster.getRootGroup().getSubgroups().size()); assertEquals(8, cluster.distributionBits()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1")); assertEquals("node-1-3-10-07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7")); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7")); assertEquals("node-1-3-10-01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName()); } @Test public void testExplicitNonDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='false' count='6'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals( 8, cluster.distributionBits()); assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-09", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-05", clusterControllers.getContainers().get(3).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(4).getHostName()); assertEquals("node-1-3-10-09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName()); assertEquals("node-1-3-10-06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); } @Test public void testClusterControllersWithGroupSize2() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='4'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(2).getHostName()); } @Test public void testClusterControllersIncludeNonRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09", "node-1-3-10-06", "node-1-3-10-03"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3 + 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("Non-retired", "node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("Non-retired", "node-1-3-10-05", clusterControllers.getContainers().get(1).getHostName()); assertEquals("Non-retired", "node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("Retired", "node-1-3-10-09", clusterControllers.getContainers().get(3).getHostName()); assertEquals("Retired", "node-1-3-10-06", clusterControllers.getContainers().get(4).getHostName()); assertEquals("Retired", "node-1-3-10-03", clusterControllers.getContainers().get(5).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-07", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-09", model.getAdmin().getSlobroks().get(3).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-01", "node-1-3-10-02"); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-09", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-01", model.getAdmin().getSlobroks().get(4).getHostName()); } @Test public void testSlobroksAreSpreadOverAllContainerClusters() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <container version='1.0' id='bar'>" + " <nodes count='3'/>" + " </container>" + "</services>"; int numberOfHosts = 13; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-12", "node-1-3-10-03", "node-1-3-10-02"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-13", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-12", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("node-1-3-10-01", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-03", model.getAdmin().getSlobroks().get(4).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(5).getHostName()); } @Test public void test2ContentNodesProduces1ClusterController() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); } @Test public void testDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + "</services>"; int numberOfHosts = 7; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(7, model.getRoot().hostSystem().getHosts().size()); assertNull(model.getContentClusters().get("foo").getClusterControllers()); assertNull(model.getContentClusters().get("bar").getClusterControllers()); ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("cluster-controllers", clusterControllers.getName()); clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> { assertTrue(host.spec().membership().get().cluster().isStateful()); assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type()); }); } @Test public void testExplicitDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='true' count='4'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(4, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(3).getHostName()); } @Test public void testLogserverContainerWhenDedicatedLogserver() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'/>" + " </logservers>" + " </admin>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = false; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testImplicitLogserverContainer() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = true; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveReadyCopies()); assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(3, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5")); } @Test public void testUsingNodesCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <container version='1.0' id='container'>" + " <search/>" + " <nodes count='2'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(4, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(4, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(4, cluster.redundancy().effectiveReadyCopies()); assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(4, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(4)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3")); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(0).getHostName()); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test(expected = IllegalArgumentException.class) public void testRequiringMoreNodesThanAreAvailable() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.createModel(services, false); } @Test(expected = IllegalArgumentException.class) @Test public void testExclusiveDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(5, model.hostSystem().getHosts().size()); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testExclusiveNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<container version='1.0' id='container'>" + " <nodes count='2' exclusive='true'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testUsingNodesCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test public void testRequestingSpecificNodeResources() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'>" + " <resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>" + " </nodes>" + " </logservers>" + " <slobroks>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>" + " </nodes>" + " </slobroks>" + " </admin>" + " <container version='1.0' id='container'>" + " <nodes count='4'>" + " <resources vcpu='12' memory='10Gb' disk='30Gb'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.8' memory='3Gb' disk='2Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='5'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='3' dedicated='true'>" + " <resources vcpu='0.7' memory='2Gb' disk='2.5Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='6'>" + " <resources vcpu='10' memory='64Gb' disk='200Gb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1); tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2); tester.addHosts(new NodeResources(12, 10, 30, 0.3), 4); tester.addHosts(new NodeResources(0.8, 3, 2, 0.3), 2); tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5); tester.addHosts(new NodeResources(0.7, 2, 2.5, 0.3), 3); tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6); VespaModel model = tester.createModel(services, true, 0); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMin() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMax() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 26; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testContainerOnly() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(3, model.getContainerClusters().get("container").getContainers().size()); assertNotNull(model.getAdmin().getLogserver()); assertEquals(3, model.getAdmin().getSlobroks().size()); } @Test public void testJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvmargs='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptions() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptionsOverridesJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' jvmargs='abc' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); try { tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage()); } } @Test public void testUsingHostaliasWithProvisioner() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<admin version='2.0'>" + " <adminserver hostalias='node1'/>\n"+ "</admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + "</container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); } @Test public void testThatStandaloneSyntaxWorksOnHostedVespa() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() { try { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='8095' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " + getDefaults().vespaWebServicePort(), e.getMessage()); } } @Test public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='foo' version='1.0'>" + " <nodes>" + " <node hostalias='node1'/>" + " <node hostalias='node2'/>" + " </nodes>" + " </container>" + " <content id='bar' version='1.0'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <redundancy>2</redundancy>" + " <nodes>" + " <group>" + " <node distribution-key='0' hostalias='node3'/>" + " <node distribution-key='1' hostalias='node4'/>" + " </group>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(true); tester.addHosts(4); VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true); assertEquals("We get 1 node per cluster and no admin node", 2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Deploying an application with "nodes count" standalone should give a single-node deployment */ @Test public void testThatHostedSyntaxWorksOnStandalone() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(3); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in container cluster", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content cluster (downscaled)", 1, model.getContentClusters().get("content").getRootGroup().getNodes().size()); model.getConfig(new StorStatusConfig.Builder(), "default"); } @Test public void testNoNodeTagMeansTwoNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(3); VespaModel model = tester.createModel(services, true); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } @Test public void testNoNodeTagMeansTwoNodesNoContent() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testNoNodeTagMeans1NodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size()); } @Test public void testSingleNodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " <nodes><node hostalias='foo'/></nodes>"+ " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+ " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Recreate the combination used in some factory tests */ @Test public void testMultitenantButNotHosted() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testModelWithReferencedIndexingCluster() { String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(1, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testSharedNodesNotHosted() { String hosts = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<hosts>\n" + " <host name=\"vespa-1\">\n" + " <alias>vespa-1</alias>\n" + " </host>\n" + " <host name=\"vespa-2\">\n" + " <alias>vespa-2</alias>\n" + " </host>\n" + " <host name=\"vespa-3\">\n" + " <alias>vespa-3</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " <node hostalias=\"vespa-2\"/>\n" + " <node hostalias=\"vespa-3\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" + " <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedModel(false, hosts, services); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(3, content.getRootGroup().getNodes().size()); } @Test public void testMultitenantButNotHostedSharedContentNode() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " <content id='search' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " </group>" + " <documents>" + " <document type='type1'/>" + " </documents>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testStatefulProperty() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='qrs'>" + " <nodes count='1'/>" + " </container>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='3'/>" + " </container>" + " <content version='1.0' id='content'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(6); VespaModel model = tester.createModel(servicesXml, true); Map<String, Boolean> tests = Map.of("qrs", false, "zk", true, "content", true); Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream() .collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value())); tests.forEach((clusterId, stateful) -> { List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of()); assertFalse("Hosts are provisioned for '" + clusterId + "'", hosts.isEmpty()); assertEquals("Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful", stateful, hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful())); }); } @Test public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='4'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(servicesXml, true, "node-1-3-10-01"); ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count()); assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count()); } @Test public void containerWithZooKeeperJoiningServers() { Function<Integer, String> servicesXml = (nodeCount) -> { return "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='" + nodeCount + "'/>" + " </container>" + "</services>"; }; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(servicesXml.apply(3), true); { ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertTrue("Initial servers are not joining", config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining)); } { VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(5), true, false, 0, Optional.of(model)); ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertEquals("New nodes are joining", Map.of(0, false, 1, false, 2, false, 3, true, 4, true), config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id, ZookeeperServerConfig.Server::joining))); } } private VespaModel createNonProvisionedMultitenantModel(String services) { return createNonProvisionedModel(true, null, services); } private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) { VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1")); ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg; DeployState deployState = new DeployState.Builder().applicationPackage(appPkg). properties((new TestProperties()).setMultitenant(multitenant)). build(); return modelCreatorWithMockPkg.create(false, deployState); } private int physicalMemoryPercentage(ContainerCluster cluster) { QrStartConfig.Builder b = new QrStartConfig.Builder(); cluster.getConfig(b); return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory(); } private long protonMemorySize(ContentCluster cluster) { ProtonConfig.Builder b = new ProtonConfig.Builder(); cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b); return b.build().hwinfo().memory().size(); } @Test public void require_that_proton_config_is_tuned_based_on_node_resources() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='2'>", " <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>", " </nodes>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 5, NodeResources.DiskSpeed.slow), 2); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); assertEquals(2, cluster.getSearchNodes().size()); assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001); assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001); } private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); List<SearchNode> searchNodes = cluster.getSearchNodes(); assertTrue(searchNodeIdx < searchNodes.size()); searchNodes.get(searchNodeIdx).getConfig(builder); return new ProtonConfig(builder); } @Test public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <config name='vespa.config.search.core.proton'>", " <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>", " </config>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='1'>", " <resources vcpu='1' memory='128Gb' disk='100Gb'/>", " </nodes>", " <engine>", " <proton>", " <resource-limits>", " <memory>0.92</memory>", " </resource-limits>", " <tuning>", " <searchnode>", " <flushstrategy>", " <native>", " <total>", " <maxmemorygain>1000</maxmemorygain>", " </total>", " </native>", " </flushstrategy>", " </searchnode>", " </tuning>", " </proton>", " </engine>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 1), 1); tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId()); assertEquals(2000, cfg.flush().memory().maxtlssize()); assertEquals(1000, cfg.flush().memory().maxmemory()); assertEquals((long) (128 - reservedMemoryGb) * GB / 8, cfg.flush().memory().each().maxmemory()); assertEquals(0.92, cfg.writefilter().memorylimit(), 0.0001); } private static ProtonConfig getProtonConfig(VespaModel model, String configId) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); model.getConfig(builder, configId); return new ProtonConfig(builder); } private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) { int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(Zone.defaultZone(), services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); Admin admin = model.getAdmin(); Logserver logserver = admin.getLogserver(); HostResource hostResource = logserver.getHostResource(); assertNotNull(hostResource.getService("logserver")); String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName; assertNotNull(hostResource.getService(containerServiceType)); String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId(); ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder(); model.getConfig(builder, configId); ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder); assertEquals(1, cfg.generation()); LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder(); model.getConfig(logdConfigBuilder, configId); LogdConfig logdConfig = new LogdConfig(logdConfigBuilder); assertTrue(logdConfig.logserver().use()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId, ClusterSpec.Type type, VespaModel model) { assertEquals("Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""), nodeCount, model.hostSystem().getHosts().stream() .map(h -> h.spec().membership().get().cluster()) .filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId))) .count()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) { assertProvisioned(nodeCount, id, null, type, model); } }
class ModelProvisioningTest { @Test public void testNodesJdisc() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>\n" + "\n" + "<admin version='3.0'><nodes count='1' /></admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count=\"3\"/>" + "</container>" + "<container id='mydisc2' version='1.0'>" + " <document-processing/>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count='2' allocated-memory='45%' jvm-gc-options='-XX:+UseParNewGC' jvm-options='-verbosegc' preload='lib/blablamalloc.so'/>" + "</container>" + "</services>"; String hosts ="<hosts>" + " <host name='myhost0'>" + " <alias>node0</alias>" + " </host>" + " <host name='myhost1'>" + " <alias>node1</alias>" + " </host>" + " <host name='myhost2'>" + " <alias>node2</alias>" + " </host>" + " <host name='myhost3'>" + " <alias>node3</alias>" + " </host>" + " <host name='myhost4'>" + " <alias>node4</alias>" + " </host>" + " <host name='myhost5'>" + " <alias>node5</alias>" + " </host>" + "</hosts>"; VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services); VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false))); ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc"); ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2"); assertEquals(3, mydisc.getContainers().size()); assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId())); assertTrue(mydisc.getContainers().get(0).isInitialized()); assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId()); assertTrue(mydisc.getContainers().get(1).isInitialized()); assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId()); assertTrue(mydisc.getContainers().get(2).isInitialized()); assertEquals(2, mydisc2.getContainers().size()); assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId()); assertTrue(mydisc2.getContainers().get(0).isInitialized()); assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId()); assertTrue(mydisc2.getContainers().get(1).isInitialized()); assertEquals("", mydisc.getContainers().get(0).getJvmOptions()); assertEquals("", mydisc.getContainers().get(1).getJvmOptions()); assertEquals("", mydisc.getContainers().get(2).getJvmOptions()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad()); assertEquals(Optional.empty(), mydisc.getMemoryPercentage()); assertEquals("-verbosegc", mydisc2.getContainers().get(0).getJvmOptions()); assertEquals("-verbosegc", mydisc2.getContainers().get(1).getJvmOptions()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad()); assertEquals(Optional.of(45), mydisc2.getMemoryPercentage()); assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); mydisc2.getConfig(qrStartBuilder); QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); HostSystem hostSystem = model.hostSystem(); assertNotNull(hostSystem.getHostByHostname("myhost0")); assertNotNull(hostSystem.getHostByHostname("myhost1")); assertNotNull(hostSystem.getHostByHostname("myhost2")); assertNotNull(hostSystem.getHostByHostname("myhost3")); assertNull(hostSystem.getHostByHostname("Nope")); } @Test public void testNodeCountForContentGroup() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + "\n" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); int numberOfHosts = 2; tester.addHosts(numberOfHosts); int numberOfContentNodes = 2; VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); Map<String, ContentCluster> contentClusters = model.getContentClusters(); ContentCluster cluster = contentClusters.get("bar"); assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size()); int i = 0; for (StorageNode node : cluster.getRootGroup().getNodes()) assertEquals(i++, node.getDistributionKey()); } @Test public void testSeparateClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in cluster without ID", 2, model.getContentClusters().get("content").getRootGroup().getNodes().size()); assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model); assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model); } @Test public void testClusterMembership() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes count='1'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(1, model.hostSystem().getHosts().size()); HostResource host = model.hostSystem().getHosts().iterator().next(); assertTrue(host.spec().membership().isPresent()); assertEquals("container", host.spec().membership().get().cluster().type().name()); assertEquals("container1", host.spec().membership().get().cluster().id().value()); } @Test public void testCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes of='content1'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is lowered with combined clusters", 17, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is lowered to account for the jvm heap", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.17)), protonMemorySize(model.getContentClusters().get("content1"))); assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model); } } /** For comparison with the above */ @Test public void testNonCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes count='2'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is normal", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is normal", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1"))); } } @Test public void testCombinedClusterWithJvmOptions() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <document-processing/>" + " <nodes of='content1' jvm-options='testoption'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); for (Container container : model.getContainerClusters().get("container1").getContainers()) assertTrue(container.getJvmOptions().contains("testoption")); } @Test public void testMultipleCombinedClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='content1'/>" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes of='content2'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0' id='content2'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size()); assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size()); } @Test public void testNonExistingCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e)); } } @Test public void testInvalidCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes count='2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e)); } } @Test public void testUsingNodesAndGroupCountAttributes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='9'/>" + " </content>" + " <content version='1.0' id='baz'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='27'/>" + " </content>" + "</services>"; int numberOfHosts = 64; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getContainerClusters().size()); Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream() .map(Container::getHost) .collect(Collectors.toSet()); assertEquals(10, containerHosts.size()); Admin admin = model.getAdmin(); Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet()); assertEquals(3, slobrokHosts.size()); assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts)); assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost())); assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size()); assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers()); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-54", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-51", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-48", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(9, cluster.getRootGroup().getSubgroups().size()); assertEquals("0", cluster.getRootGroup().getSubgroups().get(0).getIndex()); assertEquals(3, cluster.getRootGroup().getSubgroups().get(0).getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3")); assertEquals("node-1-3-10-51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5")); assertEquals("node-1-3-10-48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26")); cluster = model.getContentClusters().get("baz"); clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("baz-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-27", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-26", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-25", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(27, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0")); assertEquals("node-1-3-10-27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1")); assertEquals("node-1-3-10-26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26")); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26")); } @Test public void testGroupsOfSize1() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='8'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-07", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(8, cluster.getRootGroup().getSubgroups().size()); assertEquals(8, cluster.distributionBits()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1")); assertEquals("node-1-3-10-07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7")); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7")); assertEquals("node-1-3-10-01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName()); } @Test public void testExplicitNonDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='false' count='6'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals( 8, cluster.distributionBits()); assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-09", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-05", clusterControllers.getContainers().get(3).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(4).getHostName()); assertEquals("node-1-3-10-09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName()); assertEquals("node-1-3-10-06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); } @Test public void testClusterControllersWithGroupSize2() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='4'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(2).getHostName()); } @Test public void testClusterControllersIncludeNonRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09", "node-1-3-10-06", "node-1-3-10-03"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3 + 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("Non-retired", "node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("Non-retired", "node-1-3-10-05", clusterControllers.getContainers().get(1).getHostName()); assertEquals("Non-retired", "node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("Retired", "node-1-3-10-09", clusterControllers.getContainers().get(3).getHostName()); assertEquals("Retired", "node-1-3-10-06", clusterControllers.getContainers().get(4).getHostName()); assertEquals("Retired", "node-1-3-10-03", clusterControllers.getContainers().get(5).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-07", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-09", model.getAdmin().getSlobroks().get(3).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-01", "node-1-3-10-02"); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-09", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-01", model.getAdmin().getSlobroks().get(4).getHostName()); } @Test public void testSlobroksAreSpreadOverAllContainerClusters() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <container version='1.0' id='bar'>" + " <nodes count='3'/>" + " </container>" + "</services>"; int numberOfHosts = 13; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-12", "node-1-3-10-03", "node-1-3-10-02"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-13", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-12", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("node-1-3-10-01", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-03", model.getAdmin().getSlobroks().get(4).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(5).getHostName()); } @Test public void test2ContentNodesProduces1ClusterController() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); } @Test public void testDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + "</services>"; int numberOfHosts = 7; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(7, model.getRoot().hostSystem().getHosts().size()); assertNull(model.getContentClusters().get("foo").getClusterControllers()); assertNull(model.getContentClusters().get("bar").getClusterControllers()); ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("cluster-controllers", clusterControllers.getName()); clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> { assertTrue(host.spec().membership().get().cluster().isStateful()); assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type()); }); } @Test public void testExplicitDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='true' count='4'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(4, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(3).getHostName()); } @Test public void testLogserverContainerWhenDedicatedLogserver() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'/>" + " </logservers>" + " </admin>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = false; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testImplicitLogserverContainer() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = true; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveReadyCopies()); assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(3, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5")); } @Test public void testUsingNodesCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <container version='1.0' id='container'>" + " <search/>" + " <nodes count='2'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(4, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(4, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(4, cluster.redundancy().effectiveReadyCopies()); assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(4, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(4)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3")); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(0).getHostName()); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test(expected = IllegalArgumentException.class) public void testRequiringMoreNodesThanAreAvailable() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.createModel(services, false); } @Test(expected = IllegalArgumentException.class) @Test public void testExclusiveDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(5, model.hostSystem().getHosts().size()); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testExclusiveNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<container version='1.0' id='container'>" + " <nodes count='2' exclusive='true'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testUsingNodesCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test public void testRequestingSpecificNodeResources() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'>" + " <resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>" + " </nodes>" + " </logservers>" + " <slobroks>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>" + " </nodes>" + " </slobroks>" + " </admin>" + " <container version='1.0' id='container'>" + " <nodes count='4'>" + " <resources vcpu='12' memory='10Gb' disk='30Gb'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.8' memory='3Gb' disk='2Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='5'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='3' dedicated='true'>" + " <resources vcpu='0.7' memory='2Gb' disk='2.5Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='6'>" + " <resources vcpu='10' memory='64Gb' disk='200Gb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1); tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2); tester.addHosts(new NodeResources(12, 10, 30, 0.3), 4); tester.addHosts(new NodeResources(0.8, 3, 2, 0.3), 2); tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5); tester.addHosts(new NodeResources(0.7, 2, 2.5, 0.3), 3); tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6); VespaModel model = tester.createModel(services, true, 0); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMin() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMax() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 26; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testContainerOnly() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(3, model.getContainerClusters().get("container").getContainers().size()); assertNotNull(model.getAdmin().getLogserver()); assertEquals(3, model.getAdmin().getSlobroks().size()); } @Test public void testJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvmargs='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptions() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptionsOverridesJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' jvmargs='abc' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); try { tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage()); } } @Test public void testUsingHostaliasWithProvisioner() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<admin version='2.0'>" + " <adminserver hostalias='node1'/>\n"+ "</admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + "</container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); } @Test public void testThatStandaloneSyntaxWorksOnHostedVespa() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() { try { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='8095' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " + getDefaults().vespaWebServicePort(), e.getMessage()); } } @Test public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='foo' version='1.0'>" + " <nodes>" + " <node hostalias='node1'/>" + " <node hostalias='node2'/>" + " </nodes>" + " </container>" + " <content id='bar' version='1.0'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <redundancy>2</redundancy>" + " <nodes>" + " <group>" + " <node distribution-key='0' hostalias='node3'/>" + " <node distribution-key='1' hostalias='node4'/>" + " </group>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(true); tester.addHosts(4); VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true); assertEquals("We get 1 node per cluster and no admin node", 2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Deploying an application with "nodes count" standalone should give a single-node deployment */ @Test public void testThatHostedSyntaxWorksOnStandalone() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(3); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in container cluster", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content cluster (downscaled)", 1, model.getContentClusters().get("content").getRootGroup().getNodes().size()); model.getConfig(new StorStatusConfig.Builder(), "default"); } @Test public void testNoNodeTagMeansTwoNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(3); VespaModel model = tester.createModel(services, true); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } @Test public void testNoNodeTagMeansTwoNodesNoContent() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testNoNodeTagMeans1NodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size()); } @Test public void testSingleNodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " <nodes><node hostalias='foo'/></nodes>"+ " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+ " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Recreate the combination used in some factory tests */ @Test public void testMultitenantButNotHosted() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testModelWithReferencedIndexingCluster() { String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(1, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testSharedNodesNotHosted() { String hosts = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<hosts>\n" + " <host name=\"vespa-1\">\n" + " <alias>vespa-1</alias>\n" + " </host>\n" + " <host name=\"vespa-2\">\n" + " <alias>vespa-2</alias>\n" + " </host>\n" + " <host name=\"vespa-3\">\n" + " <alias>vespa-3</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " <node hostalias=\"vespa-2\"/>\n" + " <node hostalias=\"vespa-3\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" + " <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedModel(false, hosts, services); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(3, content.getRootGroup().getNodes().size()); } @Test public void testMultitenantButNotHostedSharedContentNode() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " <content id='search' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " </group>" + " <documents>" + " <document type='type1'/>" + " </documents>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testStatefulProperty() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='qrs'>" + " <nodes count='1'/>" + " </container>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='3'/>" + " </container>" + " <content version='1.0' id='content'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(6); VespaModel model = tester.createModel(servicesXml, true); Map<String, Boolean> tests = Map.of("qrs", false, "zk", true, "content", true); Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream() .collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value())); tests.forEach((clusterId, stateful) -> { List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of()); assertFalse("Hosts are provisioned for '" + clusterId + "'", hosts.isEmpty()); assertEquals("Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful", stateful, hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful())); }); } @Test public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='4'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(servicesXml, true, "node-1-3-10-01"); ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count()); assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count()); } @Test public void containerWithZooKeeperJoiningServers() { Function<Integer, String> servicesXml = (nodeCount) -> { return "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='" + nodeCount + "'/>" + " </container>" + "</services>"; }; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(servicesXml.apply(3), true); { ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertTrue("Initial servers are not joining", config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining)); } { VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(5), true, false, 0, Optional.of(model)); ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertEquals("New nodes are joining", Map.of(0, false, 1, false, 2, false, 3, true, 4, true), config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id, ZookeeperServerConfig.Server::joining))); } } private VespaModel createNonProvisionedMultitenantModel(String services) { return createNonProvisionedModel(true, null, services); } private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) { VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1")); ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg; DeployState deployState = new DeployState.Builder().applicationPackage(appPkg). properties((new TestProperties()).setMultitenant(multitenant)). build(); return modelCreatorWithMockPkg.create(false, deployState); } private int physicalMemoryPercentage(ContainerCluster cluster) { QrStartConfig.Builder b = new QrStartConfig.Builder(); cluster.getConfig(b); return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory(); } private long protonMemorySize(ContentCluster cluster) { ProtonConfig.Builder b = new ProtonConfig.Builder(); cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b); return b.build().hwinfo().memory().size(); } @Test public void require_that_proton_config_is_tuned_based_on_node_resources() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='2'>", " <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>", " </nodes>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 5, NodeResources.DiskSpeed.slow), 2); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); assertEquals(2, cluster.getSearchNodes().size()); assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001); assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001); } private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); List<SearchNode> searchNodes = cluster.getSearchNodes(); assertTrue(searchNodeIdx < searchNodes.size()); searchNodes.get(searchNodeIdx).getConfig(builder); return new ProtonConfig(builder); } @Test public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <config name='vespa.config.search.core.proton'>", " <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>", " </config>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='1'>", " <resources vcpu='1' memory='128Gb' disk='100Gb'/>", " </nodes>", " <engine>", " <proton>", " <resource-limits>", " <memory>0.92</memory>", " </resource-limits>", " <tuning>", " <searchnode>", " <flushstrategy>", " <native>", " <total>", " <maxmemorygain>1000</maxmemorygain>", " </total>", " </native>", " </flushstrategy>", " </searchnode>", " </tuning>", " </proton>", " </engine>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 1), 1); tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId()); assertEquals(2000, cfg.flush().memory().maxtlssize()); assertEquals(1000, cfg.flush().memory().maxmemory()); assertEquals((long) (128 - reservedMemoryGb) * GB / 8, cfg.flush().memory().each().maxmemory()); assertEquals(0.92, cfg.writefilter().memorylimit(), 0.0001); } private static ProtonConfig getProtonConfig(VespaModel model, String configId) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); model.getConfig(builder, configId); return new ProtonConfig(builder); } private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) { int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(Zone.defaultZone(), services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); Admin admin = model.getAdmin(); Logserver logserver = admin.getLogserver(); HostResource hostResource = logserver.getHostResource(); assertNotNull(hostResource.getService("logserver")); String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName; assertNotNull(hostResource.getService(containerServiceType)); String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId(); ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder(); model.getConfig(builder, configId); ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder); assertEquals(1, cfg.generation()); LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder(); model.getConfig(logdConfigBuilder, configId); LogdConfig logdConfig = new LogdConfig(logdConfigBuilder); assertTrue(logdConfig.logserver().use()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId, ClusterSpec.Type type, VespaModel model) { assertEquals("Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""), nodeCount, model.hostSystem().getHosts().stream() .map(h -> h.spec().membership().get().cluster()) .filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId))) .count()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) { assertProvisioned(nodeCount, id, null, type, model); } }
D'oh 🤦
public void testRequiredNodesAndDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 4; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); tester.createModel(services, false); }
int numberOfHosts = 4;
public void testRequiredNodesAndDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 4; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); tester.createModel(services, false); }
class ModelProvisioningTest { @Test public void testNodesJdisc() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>\n" + "\n" + "<admin version='3.0'><nodes count='1' /></admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count=\"3\"/>" + "</container>" + "<container id='mydisc2' version='1.0'>" + " <document-processing/>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count='2' allocated-memory='45%' jvm-gc-options='-XX:+UseParNewGC' jvm-options='-verbosegc' preload='lib/blablamalloc.so'/>" + "</container>" + "</services>"; String hosts ="<hosts>" + " <host name='myhost0'>" + " <alias>node0</alias>" + " </host>" + " <host name='myhost1'>" + " <alias>node1</alias>" + " </host>" + " <host name='myhost2'>" + " <alias>node2</alias>" + " </host>" + " <host name='myhost3'>" + " <alias>node3</alias>" + " </host>" + " <host name='myhost4'>" + " <alias>node4</alias>" + " </host>" + " <host name='myhost5'>" + " <alias>node5</alias>" + " </host>" + "</hosts>"; VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services); VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false))); ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc"); ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2"); assertEquals(3, mydisc.getContainers().size()); assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId())); assertTrue(mydisc.getContainers().get(0).isInitialized()); assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId()); assertTrue(mydisc.getContainers().get(1).isInitialized()); assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId()); assertTrue(mydisc.getContainers().get(2).isInitialized()); assertEquals(2, mydisc2.getContainers().size()); assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId()); assertTrue(mydisc2.getContainers().get(0).isInitialized()); assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId()); assertTrue(mydisc2.getContainers().get(1).isInitialized()); assertEquals("", mydisc.getContainers().get(0).getJvmOptions()); assertEquals("", mydisc.getContainers().get(1).getJvmOptions()); assertEquals("", mydisc.getContainers().get(2).getJvmOptions()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad()); assertEquals(Optional.empty(), mydisc.getMemoryPercentage()); assertEquals("-verbosegc", mydisc2.getContainers().get(0).getJvmOptions()); assertEquals("-verbosegc", mydisc2.getContainers().get(1).getJvmOptions()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad()); assertEquals(Optional.of(45), mydisc2.getMemoryPercentage()); assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); mydisc2.getConfig(qrStartBuilder); QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); HostSystem hostSystem = model.hostSystem(); assertNotNull(hostSystem.getHostByHostname("myhost0")); assertNotNull(hostSystem.getHostByHostname("myhost1")); assertNotNull(hostSystem.getHostByHostname("myhost2")); assertNotNull(hostSystem.getHostByHostname("myhost3")); assertNull(hostSystem.getHostByHostname("Nope")); } @Test public void testNodeCountForContentGroup() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + "\n" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); int numberOfHosts = 2; tester.addHosts(numberOfHosts); int numberOfContentNodes = 2; VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); Map<String, ContentCluster> contentClusters = model.getContentClusters(); ContentCluster cluster = contentClusters.get("bar"); assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size()); int i = 0; for (StorageNode node : cluster.getRootGroup().getNodes()) assertEquals(i++, node.getDistributionKey()); } @Test public void testSeparateClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in cluster without ID", 2, model.getContentClusters().get("content").getRootGroup().getNodes().size()); assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model); assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model); } @Test public void testClusterMembership() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes count='1'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(1, model.hostSystem().getHosts().size()); HostResource host = model.hostSystem().getHosts().iterator().next(); assertTrue(host.spec().membership().isPresent()); assertEquals("container", host.spec().membership().get().cluster().type().name()); assertEquals("container1", host.spec().membership().get().cluster().id().value()); } @Test public void testCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes of='content1'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is lowered with combined clusters", 17, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is lowered to account for the jvm heap", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.17)), protonMemorySize(model.getContentClusters().get("content1"))); assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model); } } /** For comparison with the above */ @Test public void testNonCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes count='2'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is normal", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is normal", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1"))); } } @Test public void testCombinedClusterWithJvmOptions() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <document-processing/>" + " <nodes of='content1' jvm-options='testoption'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); for (Container container : model.getContainerClusters().get("container1").getContainers()) assertTrue(container.getJvmOptions().contains("testoption")); } @Test public void testMultipleCombinedClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='content1'/>" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes of='content2'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0' id='content2'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size()); assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size()); } @Test public void testNonExistingCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e)); } } @Test public void testInvalidCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes count='2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e)); } } @Test public void testUsingNodesAndGroupCountAttributes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='9'/>" + " </content>" + " <content version='1.0' id='baz'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='27'/>" + " </content>" + "</services>"; int numberOfHosts = 64; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getContainerClusters().size()); Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream() .map(Container::getHost) .collect(Collectors.toSet()); assertEquals(10, containerHosts.size()); Admin admin = model.getAdmin(); Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet()); assertEquals(3, slobrokHosts.size()); assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts)); assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost())); assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size()); assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers()); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-54", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-51", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-48", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(9, cluster.getRootGroup().getSubgroups().size()); assertEquals("0", cluster.getRootGroup().getSubgroups().get(0).getIndex()); assertEquals(3, cluster.getRootGroup().getSubgroups().get(0).getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3")); assertEquals("node-1-3-10-51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5")); assertEquals("node-1-3-10-48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26")); cluster = model.getContentClusters().get("baz"); clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("baz-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-27", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-26", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-25", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(27, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0")); assertEquals("node-1-3-10-27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1")); assertEquals("node-1-3-10-26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26")); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26")); } @Test public void testGroupsOfSize1() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='8'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-07", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(8, cluster.getRootGroup().getSubgroups().size()); assertEquals(8, cluster.distributionBits()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1")); assertEquals("node-1-3-10-07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7")); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7")); assertEquals("node-1-3-10-01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName()); } @Test public void testExplicitNonDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='false' count='6'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals( 8, cluster.distributionBits()); assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-09", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-05", clusterControllers.getContainers().get(3).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(4).getHostName()); assertEquals("node-1-3-10-09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName()); assertEquals("node-1-3-10-06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); } @Test public void testClusterControllersWithGroupSize2() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='4'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(2).getHostName()); } @Test public void testClusterControllersIncludeNonRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09", "node-1-3-10-06", "node-1-3-10-03"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3 + 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("Non-retired", "node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("Non-retired", "node-1-3-10-05", clusterControllers.getContainers().get(1).getHostName()); assertEquals("Non-retired", "node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("Retired", "node-1-3-10-09", clusterControllers.getContainers().get(3).getHostName()); assertEquals("Retired", "node-1-3-10-06", clusterControllers.getContainers().get(4).getHostName()); assertEquals("Retired", "node-1-3-10-03", clusterControllers.getContainers().get(5).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-07", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-09", model.getAdmin().getSlobroks().get(3).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-01", "node-1-3-10-02"); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-09", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-01", model.getAdmin().getSlobroks().get(4).getHostName()); } @Test public void testSlobroksAreSpreadOverAllContainerClusters() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <container version='1.0' id='bar'>" + " <nodes count='3'/>" + " </container>" + "</services>"; int numberOfHosts = 13; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-12", "node-1-3-10-03", "node-1-3-10-02"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-13", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-12", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("node-1-3-10-01", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-03", model.getAdmin().getSlobroks().get(4).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(5).getHostName()); } @Test public void test2ContentNodesProduces1ClusterController() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); } @Test public void testDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + "</services>"; int numberOfHosts = 7; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(7, model.getRoot().hostSystem().getHosts().size()); assertNull(model.getContentClusters().get("foo").getClusterControllers()); assertNull(model.getContentClusters().get("bar").getClusterControllers()); ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("cluster-controllers", clusterControllers.getName()); clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> { assertTrue(host.spec().membership().get().cluster().isStateful()); assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type()); }); } @Test public void testExplicitDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='true' count='4'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(4, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(3).getHostName()); } @Test public void testLogserverContainerWhenDedicatedLogserver() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'/>" + " </logservers>" + " </admin>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = false; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testImplicitLogserverContainer() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = true; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveReadyCopies()); assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(3, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5")); } @Test public void testUsingNodesCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <container version='1.0' id='container'>" + " <search/>" + " <nodes count='2'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(4, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(4, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(4, cluster.redundancy().effectiveReadyCopies()); assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(4, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(4)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3")); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(0).getHostName()); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test(expected = IllegalArgumentException.class) public void testRequiringMoreNodesThanAreAvailable() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.createModel(services, false); } @Test(expected = IllegalArgumentException.class) @Test public void testExclusiveDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(5, model.hostSystem().getHosts().size()); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testExclusiveNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<container version='1.0' id='container'>" + " <nodes count='2' exclusive='true'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testUsingNodesCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test public void testRequestingSpecificNodeResources() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'>" + " <resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>" + " </nodes>" + " </logservers>" + " <slobroks>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>" + " </nodes>" + " </slobroks>" + " </admin>" + " <container version='1.0' id='container'>" + " <nodes count='4'>" + " <resources vcpu='12' memory='10Gb' disk='30Gb'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.8' memory='3Gb' disk='2Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='5'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='3' dedicated='true'>" + " <resources vcpu='0.7' memory='2Gb' disk='2.5Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='6'>" + " <resources vcpu='10' memory='64Gb' disk='200Gb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1); tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2); tester.addHosts(new NodeResources(12, 10, 30, 0.3), 4); tester.addHosts(new NodeResources(0.8, 3, 2, 0.3), 2); tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5); tester.addHosts(new NodeResources(0.7, 2, 2.5, 0.3), 3); tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6); VespaModel model = tester.createModel(services, true, 0); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMin() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMax() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 26; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testContainerOnly() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(3, model.getContainerClusters().get("container").getContainers().size()); assertNotNull(model.getAdmin().getLogserver()); assertEquals(3, model.getAdmin().getSlobroks().size()); } @Test public void testJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvmargs='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptions() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptionsOverridesJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' jvmargs='abc' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); try { tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage()); } } @Test public void testUsingHostaliasWithProvisioner() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<admin version='2.0'>" + " <adminserver hostalias='node1'/>\n"+ "</admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + "</container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); } @Test public void testThatStandaloneSyntaxWorksOnHostedVespa() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() { try { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='8095' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " + getDefaults().vespaWebServicePort(), e.getMessage()); } } @Test public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='foo' version='1.0'>" + " <nodes>" + " <node hostalias='node1'/>" + " <node hostalias='node2'/>" + " </nodes>" + " </container>" + " <content id='bar' version='1.0'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <redundancy>2</redundancy>" + " <nodes>" + " <group>" + " <node distribution-key='0' hostalias='node3'/>" + " <node distribution-key='1' hostalias='node4'/>" + " </group>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(true); tester.addHosts(4); VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true); assertEquals("We get 1 node per cluster and no admin node", 2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Deploying an application with "nodes count" standalone should give a single-node deployment */ @Test public void testThatHostedSyntaxWorksOnStandalone() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(3); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in container cluster", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content cluster (downscaled)", 1, model.getContentClusters().get("content").getRootGroup().getNodes().size()); model.getConfig(new StorStatusConfig.Builder(), "default"); } @Test public void testNoNodeTagMeansTwoNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(3); VespaModel model = tester.createModel(services, true); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } @Test public void testNoNodeTagMeansTwoNodesNoContent() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testNoNodeTagMeans1NodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size()); } @Test public void testSingleNodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " <nodes><node hostalias='foo'/></nodes>"+ " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+ " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Recreate the combination used in some factory tests */ @Test public void testMultitenantButNotHosted() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testModelWithReferencedIndexingCluster() { String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(1, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testSharedNodesNotHosted() { String hosts = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<hosts>\n" + " <host name=\"vespa-1\">\n" + " <alias>vespa-1</alias>\n" + " </host>\n" + " <host name=\"vespa-2\">\n" + " <alias>vespa-2</alias>\n" + " </host>\n" + " <host name=\"vespa-3\">\n" + " <alias>vespa-3</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " <node hostalias=\"vespa-2\"/>\n" + " <node hostalias=\"vespa-3\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" + " <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedModel(false, hosts, services); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(3, content.getRootGroup().getNodes().size()); } @Test public void testMultitenantButNotHostedSharedContentNode() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " <content id='search' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " </group>" + " <documents>" + " <document type='type1'/>" + " </documents>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testStatefulProperty() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='qrs'>" + " <nodes count='1'/>" + " </container>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='3'/>" + " </container>" + " <content version='1.0' id='content'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(6); VespaModel model = tester.createModel(servicesXml, true); Map<String, Boolean> tests = Map.of("qrs", false, "zk", true, "content", true); Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream() .collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value())); tests.forEach((clusterId, stateful) -> { List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of()); assertFalse("Hosts are provisioned for '" + clusterId + "'", hosts.isEmpty()); assertEquals("Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful", stateful, hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful())); }); } @Test public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='4'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(servicesXml, true, "node-1-3-10-01"); ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count()); assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count()); } @Test public void containerWithZooKeeperJoiningServers() { Function<Integer, String> servicesXml = (nodeCount) -> { return "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='" + nodeCount + "'/>" + " </container>" + "</services>"; }; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(servicesXml.apply(3), true); { ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertTrue("Initial servers are not joining", config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining)); } { VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(5), true, false, 0, Optional.of(model)); ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertEquals("New nodes are joining", Map.of(0, false, 1, false, 2, false, 3, true, 4, true), config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id, ZookeeperServerConfig.Server::joining))); } } private VespaModel createNonProvisionedMultitenantModel(String services) { return createNonProvisionedModel(true, null, services); } private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) { VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1")); ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg; DeployState deployState = new DeployState.Builder().applicationPackage(appPkg). properties((new TestProperties()).setMultitenant(multitenant)). build(); return modelCreatorWithMockPkg.create(false, deployState); } private int physicalMemoryPercentage(ContainerCluster cluster) { QrStartConfig.Builder b = new QrStartConfig.Builder(); cluster.getConfig(b); return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory(); } private long protonMemorySize(ContentCluster cluster) { ProtonConfig.Builder b = new ProtonConfig.Builder(); cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b); return b.build().hwinfo().memory().size(); } @Test public void require_that_proton_config_is_tuned_based_on_node_resources() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='2'>", " <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>", " </nodes>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 5, NodeResources.DiskSpeed.slow), 2); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); assertEquals(2, cluster.getSearchNodes().size()); assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001); assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001); } private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); List<SearchNode> searchNodes = cluster.getSearchNodes(); assertTrue(searchNodeIdx < searchNodes.size()); searchNodes.get(searchNodeIdx).getConfig(builder); return new ProtonConfig(builder); } @Test public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <config name='vespa.config.search.core.proton'>", " <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>", " </config>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='1'>", " <resources vcpu='1' memory='128Gb' disk='100Gb'/>", " </nodes>", " <engine>", " <proton>", " <resource-limits>", " <memory>0.92</memory>", " </resource-limits>", " <tuning>", " <searchnode>", " <flushstrategy>", " <native>", " <total>", " <maxmemorygain>1000</maxmemorygain>", " </total>", " </native>", " </flushstrategy>", " </searchnode>", " </tuning>", " </proton>", " </engine>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 1), 1); tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId()); assertEquals(2000, cfg.flush().memory().maxtlssize()); assertEquals(1000, cfg.flush().memory().maxmemory()); assertEquals((long) (128 - reservedMemoryGb) * GB / 8, cfg.flush().memory().each().maxmemory()); assertEquals(0.92, cfg.writefilter().memorylimit(), 0.0001); } private static ProtonConfig getProtonConfig(VespaModel model, String configId) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); model.getConfig(builder, configId); return new ProtonConfig(builder); } private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) { int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(Zone.defaultZone(), services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); Admin admin = model.getAdmin(); Logserver logserver = admin.getLogserver(); HostResource hostResource = logserver.getHostResource(); assertNotNull(hostResource.getService("logserver")); String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName; assertNotNull(hostResource.getService(containerServiceType)); String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId(); ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder(); model.getConfig(builder, configId); ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder); assertEquals(1, cfg.generation()); LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder(); model.getConfig(logdConfigBuilder, configId); LogdConfig logdConfig = new LogdConfig(logdConfigBuilder); assertTrue(logdConfig.logserver().use()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId, ClusterSpec.Type type, VespaModel model) { assertEquals("Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""), nodeCount, model.hostSystem().getHosts().stream() .map(h -> h.spec().membership().get().cluster()) .filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId))) .count()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) { assertProvisioned(nodeCount, id, null, type, model); } }
class ModelProvisioningTest { @Test public void testNodesJdisc() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>\n" + "\n" + "<admin version='3.0'><nodes count='1' /></admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count=\"3\"/>" + "</container>" + "<container id='mydisc2' version='1.0'>" + " <document-processing/>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes count='2' allocated-memory='45%' jvm-gc-options='-XX:+UseParNewGC' jvm-options='-verbosegc' preload='lib/blablamalloc.so'/>" + "</container>" + "</services>"; String hosts ="<hosts>" + " <host name='myhost0'>" + " <alias>node0</alias>" + " </host>" + " <host name='myhost1'>" + " <alias>node1</alias>" + " </host>" + " <host name='myhost2'>" + " <alias>node2</alias>" + " </host>" + " <host name='myhost3'>" + " <alias>node3</alias>" + " </host>" + " <host name='myhost4'>" + " <alias>node4</alias>" + " </host>" + " <host name='myhost5'>" + " <alias>node5</alias>" + " </host>" + "</hosts>"; VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services); VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true, false))); ApplicationContainerCluster mydisc = model.getContainerClusters().get("mydisc"); ApplicationContainerCluster mydisc2 = model.getContainerClusters().get("mydisc2"); assertEquals(3, mydisc.getContainers().size()); assertEquals("mydisc/container.0", (mydisc.getContainers().get(0).getConfigId())); assertTrue(mydisc.getContainers().get(0).isInitialized()); assertEquals("mydisc/container.1", mydisc.getContainers().get(1).getConfigId()); assertTrue(mydisc.getContainers().get(1).isInitialized()); assertEquals("mydisc/container.2", mydisc.getContainers().get(2).getConfigId()); assertTrue(mydisc.getContainers().get(2).isInitialized()); assertEquals(2, mydisc2.getContainers().size()); assertEquals("mydisc2/container.0", mydisc2.getContainers().get(0).getConfigId()); assertTrue(mydisc2.getContainers().get(0).isInitialized()); assertEquals("mydisc2/container.1", mydisc2.getContainers().get(1).getConfigId()); assertTrue(mydisc2.getContainers().get(1).isInitialized()); assertEquals("", mydisc.getContainers().get(0).getJvmOptions()); assertEquals("", mydisc.getContainers().get(1).getJvmOptions()); assertEquals("", mydisc.getContainers().get(2).getJvmOptions()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(0).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(1).getPreLoad()); assertEquals(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so"), mydisc.getContainers().get(2).getPreLoad()); assertEquals(Optional.empty(), mydisc.getMemoryPercentage()); assertEquals("-verbosegc", mydisc2.getContainers().get(0).getJvmOptions()); assertEquals("-verbosegc", mydisc2.getContainers().get(1).getJvmOptions()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad()); assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad()); assertEquals(Optional.of(45), mydisc2.getMemoryPercentage()); assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); mydisc2.getConfig(qrStartBuilder); QrStartConfig qrsStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(45, qrsStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); HostSystem hostSystem = model.hostSystem(); assertNotNull(hostSystem.getHostByHostname("myhost0")); assertNotNull(hostSystem.getHostByHostname("myhost1")); assertNotNull(hostSystem.getHostByHostname("myhost2")); assertNotNull(hostSystem.getHostByHostname("myhost3")); assertNull(hostSystem.getHostByHostname("Nope")); } @Test public void testNodeCountForContentGroup() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + "\n" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); int numberOfHosts = 2; tester.addHosts(numberOfHosts); int numberOfContentNodes = 2; VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); Map<String, ContentCluster> contentClusters = model.getContentClusters(); ContentCluster cluster = contentClusters.get("bar"); assertEquals(numberOfContentNodes, cluster.getRootGroup().getNodes().size()); int i = 0; for (StorageNode node : cluster.getRootGroup().getNodes()) assertEquals(i++, node.getDistributionKey()); } @Test public void testSeparateClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in cluster without ID", 2, model.getContentClusters().get("content").getRootGroup().getNodes().size()); assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model); assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model); } @Test public void testClusterMembership() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes count='1'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals(1, model.hostSystem().getHosts().size()); HostResource host = model.hostSystem().getHosts().iterator().next(); assertTrue(host.spec().membership().isPresent()); assertEquals("container", host.spec().membership().get().cluster().type().name()); assertEquals("container1", host.spec().membership().get().cluster().id().value()); } @Test public void testCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes of='content1'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is lowered with combined clusters", 17, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is lowered to account for the jvm heap", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.17)), protonMemorySize(model.getContentClusters().get("content1"))); assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model); assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model); } } /** For comparison with the above */ @Test public void testNonCombinedCluster() { var containerElements = Set.of("jdisc", "container"); for (var containerElement : containerElements) { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <" + containerElement + " version='1.0' id='container1'>" + " <search/>" + " <nodes count='2'/>" + " </" + containerElement + ">" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'>" + " <resources vcpu='1' memory='3Gb' disk='9Gb'/>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Heap size is normal", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1"))); assertEquals("Memory for proton is normal", (long)((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1"))); } } @Test public void testCombinedClusterWithJvmOptions() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <document-processing/>" + " <nodes of='content1' jvm-options='testoption'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); for (Container container : model.getContainerClusters().get("container1").getContainers()) assertTrue(container.getJvmOptions().contains("testoption")); } @Test public void testMultipleCombinedClusters() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='content1'/>" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes of='content2'/>" + " </container>" + " <content version='1.0' id='content1'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + " <content version='1.0' id='content2'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size()); assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size()); assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size()); } @Test public void testNonExistingCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: referenced service 'container2' is not defined", Exceptions.toMessageString(e)); } } @Test public void testInvalidCombinedClusterReference() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" + " </container>" + " <container version='1.0' id='container2'>" + " <nodes count='2'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); try { tester.createModel(xmlWithNodes, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("container cluster 'container1' contains an invalid reference: service 'container2' is not a content service", Exceptions.toMessageString(e)); } } @Test public void testUsingNodesAndGroupCountAttributes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='9'/>" + " </content>" + " <content version='1.0' id='baz'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='27' groups='27'/>" + " </content>" + "</services>"; int numberOfHosts = 64; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getContainerClusters().size()); Set<HostResource> containerHosts = model.getContainerClusters().get("foo").getContainers().stream() .map(Container::getHost) .collect(Collectors.toSet()); assertEquals(10, containerHosts.size()); Admin admin = model.getAdmin(); Set<HostResource> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet()); assertEquals(3, slobrokHosts.size()); assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts)); assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost())); assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size()); assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers()); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-54", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-51", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-48", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(9, cluster.getRootGroup().getSubgroups().size()); assertEquals("0", cluster.getRootGroup().getSubgroups().get(0).getIndex()); assertEquals(3, cluster.getRootGroup().getSubgroups().get(0).getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3")); assertEquals("node-1-3-10-51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5")); assertEquals("node-1-3-10-48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25")); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26")); cluster = model.getContentClusters().get("baz"); clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("baz-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-27", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-26", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-25", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(27, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0")); assertEquals("node-1-3-10-27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1")); assertEquals("node-1-3-10-26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26")); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26)); assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26")); } @Test public void testGroupsOfSize1() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='8'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-07", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(8, cluster.getRootGroup().getSubgroups().size()); assertEquals(8, cluster.distributionBits()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1")); assertEquals("node-1-3-10-07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7")); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7)); assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7")); assertEquals("node-1-3-10-01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName()); } @Test public void testExplicitNonDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='false' count='6'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals( 8, cluster.distributionBits()); assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-09", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-05", clusterControllers.getContainers().get(3).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(4).getHostName()); assertEquals("node-1-3-10-09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName()); assertEquals("node-1-3-10-06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName()); assertEquals("node-1-3-10-03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName()); } @Test public void testClusterControllersWithGroupSize2() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='8' groups='4'/>" + " </content>" + "</services>"; int numberOfHosts = 18; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(2).getHostName()); } @Test public void testClusterControllersIncludeNonRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 19; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09", "node-1-3-10-06", "node-1-3-10-03"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(3 + 3, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("Non-retired", "node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName()); assertEquals("Non-retired", "node-1-3-10-05", clusterControllers.getContainers().get(1).getHostName()); assertEquals("Non-retired", "node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("Retired", "node-1-3-10-09", clusterControllers.getContainers().get(3).getHostName()); assertEquals("Retired", "node-1-3-10-06", clusterControllers.getContainers().get(4).getHostName()); assertEquals("Retired", "node-1-3-10-03", clusterControllers.getContainers().get(5).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-09"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-07", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-09", model.getAdmin().getSlobroks().get(3).getHostName()); } @Test public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + "</services>"; int numberOfHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-01", "node-1-3-10-02"); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-09", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-01", model.getAdmin().getSlobroks().get(4).getHostName()); } @Test public void testSlobroksAreSpreadOverAllContainerClusters() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'/>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <container version='1.0' id='bar'>" + " <nodes count='3'/>" + " </container>" + "</services>"; int numberOfHosts = 13; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true, "node-1-3-10-12", "node-1-3-10-03", "node-1-3-10-02"); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size()); assertEquals("node-1-3-10-13", model.getAdmin().getSlobroks().get(0).getHostName()); assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(1).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-12", model.getAdmin().getSlobroks().get(2).getHostName()); assertEquals("node-1-3-10-01", model.getAdmin().getSlobroks().get(3).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-03", model.getAdmin().getSlobroks().get(4).getHostName()); assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(5).getHostName()); } @Test public void test2ContentNodesProduces1ClusterController() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); } @Test public void testDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' />" + " </content>" + "</services>"; int numberOfHosts = 7; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(7, model.getRoot().hostSystem().getHosts().size()); assertNull(model.getContentClusters().get("foo").getClusterControllers()); assertNull(model.getContentClusters().get("bar").getClusterControllers()); ClusterControllerContainerCluster clusterControllers = model.getAdmin().getClusterControllers(); assertEquals(3, clusterControllers.getContainers().size()); assertEquals("cluster-controllers", clusterControllers.getName()); clusterControllers.getContainers().stream().map(ClusterControllerContainer::getHost).forEach(host -> { assertTrue(host.spec().membership().get().cluster().isStateful()); assertEquals(ClusterSpec.Type.admin, host.spec().membership().get().cluster().type()); }); } @Test public void testExplicitDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='10'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers><nodes dedicated='true' count='4'/></controllers>" + " <nodes count='9' groups='3'/>" + " </content>" + "</services>"; int numberOfHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(4, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(0).getHostName()); assertEquals("node-1-3-10-03", clusterControllers.getContainers().get(1).getHostName()); assertEquals("node-1-3-10-02", clusterControllers.getContainers().get(2).getHostName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(3).getHostName()); } @Test public void testLogserverContainerWhenDedicatedLogserver() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'/>" + " </logservers>" + " </admin>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = false; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testImplicitLogserverContainer() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container version='1.0' id='foo'>" + " <nodes count='1'/>" + " </container>" + "</services>"; boolean useDedicatedNodeForLogserver = true; testContainerOnLogserverHost(services, useDedicatedNodeForLogserver); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(2*3, cluster.redundancy().effectiveReadyCopies()); assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get()); assertEquals(0, cluster.getRootGroup().getNodes().size()); assertEquals(3, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4")); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5)); assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5")); } @Test public void testUsingNodesCountAttributesAndGettingTooFewNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <container version='1.0' id='container'>" + " <search/>" + " <nodes count='2'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 6; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(4, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(4, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(4, cluster.redundancy().effectiveReadyCopies()); assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertEquals(4, cluster.getSearch().getIndexed().getSearchableCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(4, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(4)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1")); assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2)); assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2")); assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3)); assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3")); } @Test public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='3'>4</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24' groups='3'/>" + " <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers(); assertEquals(1, clusterControllers.getContainers().size()); assertEquals("bar-controllers", clusterControllers.getName()); assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(0).getHostName()); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test(expected = IllegalArgumentException.class) public void testRequiringMoreNodesThanAreAvailable() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' required='true'/>" + " </content>" + "</services>"; int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.createModel(services, false); } @Test(expected = IllegalArgumentException.class) @Test public void testExclusiveDedicatedClusterControllers() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <content version='1.0' id='foo'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); tester.dedicatedClusterControllerCluster(true); VespaModel model = tester.createModel(services); assertEquals(5, model.hostSystem().getHosts().size()); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testExclusiveNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<container version='1.0' id='container'>" + " <nodes count='2' exclusive='true'/>" + " </container>" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='3' exclusive='true'/>" + " </content>" + "</services>"; int numberOfHosts = 5; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); model.hostSystem().getHosts().forEach(host -> assertTrue(host.spec().membership().get().cluster().isExclusive())); } @Test public void testUsingNodesCountAttributesAndGettingJustOneNode() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <admin version='3.0'>" + " <nodes count='3'/>" + " </admin>" + " <content version='1.0' id='bar'>" + " <redundancy reply-after='8'>12</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='24'/>" + " <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" + " <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" + " </content>" + "</services>"; int numberOfHosts = 1; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, false); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); ContentCluster cluster = model.getContentClusters().get("bar"); assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); assertEquals(1, cluster.redundancy().effectiveReadyCopies()); assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size()); assertFalse(cluster.getRootGroup().getPartitions().isPresent()); assertEquals(1, cluster.getRootGroup().getNodes().size()); assertEquals(0, cluster.getRootGroup().getSubgroups().size()); assertThat(cluster.getRootGroup().getNodes().size(), is(1)); assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0)); assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } @Test public void testRequestingSpecificNodeResources() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='4.0'>" + " <logservers>" + " <nodes count='1' dedicated='true'>" + " <resources vcpu='0.1' memory='0.2Gb' disk='300Gb' disk-speed='slow'/>" + " </nodes>" + " </logservers>" + " <slobroks>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.1' memory='0.3Gb' disk='1Gb' bandwidth='500Mbps'/>" + " </nodes>" + " </slobroks>" + " </admin>" + " <container version='1.0' id='container'>" + " <nodes count='4'>" + " <resources vcpu='12' memory='10Gb' disk='30Gb'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='2' dedicated='true'>" + " <resources vcpu='0.8' memory='3Gb' disk='2Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='5'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <controllers>" + " <nodes count='3' dedicated='true'>" + " <resources vcpu='0.7' memory='2Gb' disk='2.5Gb'/>" + " </nodes>" + " </controllers>" + " <nodes count='6'>" + " <resources vcpu='10' memory='64Gb' disk='200Gb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 23; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1); tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2); tester.addHosts(new NodeResources(12, 10, 30, 0.3), 4); tester.addHosts(new NodeResources(0.8, 3, 2, 0.3), 2); tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5); tester.addHosts(new NodeResources(0.7, 2, 2.5, 0.3), 3); tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6); VespaModel model = tester.createModel(services, true, 0); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMin() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 10; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testRequestingRangesMax() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container'>" + " <nodes count='[4, 6]'>" + " <resources vcpu='[11.5, 13.5]' memory='[10Gb, 100Gb]' disk='[30Gb, 1Tb]'/>" + " </nodes>" + " </container>" + " <content version='1.0' id='foo'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='[6, 20]' groups='[3,4]'>" + " <resources vcpu='8' memory='200Gb' disk='1Pb'/>" + " </nodes>" + " </content>" + "</services>"; int totalHosts = 26; VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6); tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20); VespaModel model = tester.createModel(services, true, true); assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size()); } @Test public void testContainerOnly() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals(3, model.getContainerClusters().get("container").getContainers().size()); assertNotNull(model.getAdmin().getLogserver()); assertEquals(3, model.getAdmin().getSlobroks().size()); } @Test public void testJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvmargs='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptions() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(services, true); assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size()); assertEquals("xyz", model.getContainerClusters().get("container").getContainers().get(0).getAssignedJvmOptions()); } @Test public void testJvmOptionsOverridesJvmArgs() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<container version='1.0'>" + " <search/>" + " <nodes jvm-options='xyz' jvmargs='abc' count='3'/>" + "</container>"; int numberOfHosts = 3; VespaModelTester tester = new VespaModelTester(); tester.addHosts(numberOfHosts); try { tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage()); } } @Test public void testUsingHostaliasWithProvisioner() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + "<admin version='2.0'>" + " <adminserver hostalias='node1'/>\n"+ "</admin>\n" + "<container id='mydisc' version='1.0'>" + " <handler id='myHandler'>" + " <component id='injected' />" + " </handler>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + "</container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); } @Test public void testThatStandaloneSyntaxWorksOnHostedVespa() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testThatStandaloneSyntaxOnHostedVespaRequiresDefaultPort() { try { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<container id='foo' version='1.0'>" + " <http>" + " <server id='server1' port='8095' />" + " </http>" + "</container>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(1); VespaModel model = tester.createModel(services, true); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Illegal port 8095 in http server 'server1': Port must be set to " + getDefaults().vespaWebServicePort(), e.getMessage()); } } @Test public void testThatStandaloneSyntaxWorksOnHostedManuallyDeployed() { String services = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='foo' version='1.0'>" + " <nodes>" + " <node hostalias='node1'/>" + " <node hostalias='node2'/>" + " </nodes>" + " </container>" + " <content id='bar' version='1.0'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <redundancy>2</redundancy>" + " <nodes>" + " <group>" + " <node distribution-key='0' hostalias='node3'/>" + " <node distribution-key='1' hostalias='node4'/>" + " </group>" + " </nodes>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(true); tester.addHosts(4); VespaModel model = tester.createModel(new Zone(Environment.dev, RegionName.from("us-central-1")), services, true); assertEquals("We get 1 node per cluster and no admin node", 2, model.getHosts().size()); assertEquals(1, model.getContainerClusters().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Deploying an application with "nodes count" standalone should give a single-node deployment */ @Test public void testThatHostedSyntaxWorksOnStandalone() { String xmlWithNodes = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='container1'>" + " <search/>" + " <nodes count='1'/>" + " </container>" + " <content version='1.0'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(3); VespaModel model = tester.createModel(xmlWithNodes, true); assertEquals("Nodes in container cluster", 1, model.getContainerClusters().get("container1").getContainers().size()); assertEquals("Nodes in content cluster (downscaled)", 1, model.getContentClusters().get("content").getRootGroup().getNodes().size()); model.getConfig(new StorStatusConfig.Builder(), "default"); } @Test public void testNoNodeTagMeansTwoNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(3); VespaModel model = tester.createModel(services, true); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } @Test public void testNoNodeTagMeansTwoNodesNoContent() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(2); VespaModel model = tester.createModel(services, true); assertEquals(2, model.getRoot().hostSystem().getHosts().size()); assertEquals(2, model.getAdmin().getSlobroks().size()); assertEquals(2, model.getContainerClusters().get("foo").getContainers().size()); } @Test public void testNoNodeTagMeans1NodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size()); } @Test public void testSingleNodeNonHosted() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services>" + " <container id='foo' version='1.0'>" + " <search/>" + " <document-api/>" + " <nodes><node hostalias='foo'/></nodes>"+ " </container>" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+ " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.setHosted(false); tester.addHosts(1); VespaModel model = tester.createModel(services, true); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); assertEquals(1, model.getAdmin().getSlobroks().size()); assertEquals(1, model.getContainerClusters().get("foo").getContainers().size()); assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes()); } /** Recreate the combination used in some factory tests */ @Test public void testMultitenantButNotHosted() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertEquals(1, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testModelWithReferencedIndexingCluster() { String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(1, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testSharedNodesNotHosted() { String hosts = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<hosts>\n" + " <host name=\"vespa-1\">\n" + " <alias>vespa-1</alias>\n" + " </host>\n" + " <host name=\"vespa-2\">\n" + " <alias>vespa-2</alias>\n" + " </host>\n" + " <host name=\"vespa-3\">\n" + " <alias>vespa-3</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services version=\"1.0\">\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"vespa-1\"/>\n" + " <configservers>\n" + " <configserver hostalias=\"vespa-1\"/>\n" + " </configservers>\n" + " </admin>\n" + "\n" + " <container id=\"container\" version=\"1.0\">\n" + " <document-processing/>\n" + " <document-api/>\n" + " <search/>\n" + " <nodes jvm-options=\"-Xms512m -Xmx512m\">\n" + " <node hostalias=\"vespa-1\"/>\n" + " <node hostalias=\"vespa-2\"/>\n" + " <node hostalias=\"vespa-3\"/>\n" + " </nodes>\n" + " </container>\n" + "\n" + " <content id=\"storage\" version=\"1.0\">\n" + " <search>\n" + " <visibility-delay>1.0</visibility-delay>\n" + " </search>\n" + " <redundancy>2</redundancy>\n" + " <documents>\n" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document-processing cluster=\"container\"/>\n" + " </documents>\n" + " <nodes>\n" + " <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" + " <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" + " <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" + " </nodes>\n" + " </content>\n" + "\n" + "</services>"; VespaModel model = createNonProvisionedModel(false, hosts, services); assertEquals(3, model.getRoot().hostSystem().getHosts().size()); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(3, content.getRootGroup().getNodes().size()); } @Test public void testMultitenantButNotHostedSharedContentNode() { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='store-only' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " <content id='search' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " </group>" + " <documents>" + " <document type='type1'/>" + " </documents>" + " </content>" + " </services>"; VespaModel model = createNonProvisionedMultitenantModel(services); assertThat(model.getRoot().hostSystem().getHosts().size(), is(1)); ContentCluster content = model.getContentClusters().get("storage"); assertEquals(2, content.getRootGroup().getNodes().size()); ContainerCluster controller = content.getClusterControllers(); assertEquals(1, controller.getContainers().size()); } @Test public void testStatefulProperty() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='qrs'>" + " <nodes count='1'/>" + " </container>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='3'/>" + " </container>" + " <content version='1.0' id='content'>" + " <redundancy>2</redundancy>" + " <documents>" + " <document type='type1' mode='index'/>" + " </documents>" + " <nodes count='2'/>" + " </content>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(6); VespaModel model = tester.createModel(servicesXml, true); Map<String, Boolean> tests = Map.of("qrs", false, "zk", true, "content", true); Map<String, List<HostResource>> hostsByCluster = model.hostSystem().getHosts().stream() .collect(Collectors.groupingBy(h -> h.spec().membership().get().cluster().id().value())); tests.forEach((clusterId, stateful) -> { List<HostResource> hosts = hostsByCluster.getOrDefault(clusterId, List.of()); assertFalse("Hosts are provisioned for '" + clusterId + "'", hosts.isEmpty()); assertEquals("Hosts in cluster '" + clusterId + "' are " + (stateful ? "" : "not ") + "stateful", stateful, hosts.stream().allMatch(h -> h.spec().membership().get().cluster().isStateful())); }); } @Test public void containerWithZooKeeperSuboptimalNodeCountDuringRetirement() { String servicesXml = "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='4'/>" + " </container>" + "</services>"; VespaModelTester tester = new VespaModelTester(); tester.addHosts(4); VespaModel model = tester.createModel(servicesXml, true, "node-1-3-10-01"); ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count()); assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count()); } @Test public void containerWithZooKeeperJoiningServers() { Function<Integer, String> servicesXml = (nodeCount) -> { return "<?xml version='1.0' encoding='utf-8' ?>" + "<services>" + " <container version='1.0' id='zk'>" + " <zookeeper/>" + " <nodes count='" + nodeCount + "'/>" + " </container>" + "</services>"; }; VespaModelTester tester = new VespaModelTester(); tester.addHosts(5); VespaModel model = tester.createModel(servicesXml.apply(3), true); { ApplicationContainerCluster cluster = model.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertTrue("Initial servers are not joining", config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining)); } { VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(5), true, false, 0, Optional.of(model)); ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk"); ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder(); cluster.getContainers().forEach(c -> c.getConfig(config)); cluster.getConfig(config); assertEquals("New nodes are joining", Map.of(0, false, 1, false, 2, false, 3, true, 4, true), config.build().server().stream().collect(Collectors.toMap(ZookeeperServerConfig.Server::id, ZookeeperServerConfig.Server::joining))); } } private VespaModel createNonProvisionedMultitenantModel(String services) { return createNonProvisionedModel(true, null, services); } private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) { VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1")); ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg; DeployState deployState = new DeployState.Builder().applicationPackage(appPkg). properties((new TestProperties()).setMultitenant(multitenant)). build(); return modelCreatorWithMockPkg.create(false, deployState); } private int physicalMemoryPercentage(ContainerCluster cluster) { QrStartConfig.Builder b = new QrStartConfig.Builder(); cluster.getConfig(b); return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory(); } private long protonMemorySize(ContentCluster cluster) { ProtonConfig.Builder b = new ProtonConfig.Builder(); cluster.getSearch().getIndexed().getSearchNode(0).getConfig(b); return b.build().hwinfo().memory().size(); } @Test public void require_that_proton_config_is_tuned_based_on_node_resources() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='2'>", " <resources vcpu='1' memory='3Gb' disk='9Gb' bandwidth='5Gbps' disk-speed='slow'/>", " </nodes>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 5, NodeResources.DiskSpeed.slow), 2); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); assertEquals(2, cluster.getSearchNodes().size()); assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001); assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001); } private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); List<SearchNode> searchNodes = cluster.getSearchNodes(); assertTrue(searchNodeIdx < searchNodes.size()); searchNodes.get(searchNodeIdx).getConfig(builder); return new ProtonConfig(builder); } @Test public void require_that_config_override_and_explicit_proton_tuning_and_resource_limits_have_precedence_over_default_node_resource_tuning() { String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>", "<services>", " <content version='1.0' id='test'>", " <config name='vespa.config.search.core.proton'>", " <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>", " </config>", " <documents>", " <document type='type1' mode='index'/>", " </documents>", " <nodes count='1'>", " <resources vcpu='1' memory='128Gb' disk='100Gb'/>", " </nodes>", " <engine>", " <proton>", " <resource-limits>", " <memory>0.92</memory>", " </resource-limits>", " <tuning>", " <searchnode>", " <flushstrategy>", " <native>", " <total>", " <maxmemorygain>1000</maxmemorygain>", " </total>", " </native>", " </flushstrategy>", " </searchnode>", " </tuning>", " </proton>", " </engine>", " </content>", "</services>"); VespaModelTester tester = new VespaModelTester(); tester.addHosts(new NodeResources(1, 3, 9, 1), 1); tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1); VespaModel model = tester.createModel(services, true, 0); ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch(); ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId()); assertEquals(2000, cfg.flush().memory().maxtlssize()); assertEquals(1000, cfg.flush().memory().maxmemory()); assertEquals((long) (128 - reservedMemoryGb) * GB / 8, cfg.flush().memory().each().maxmemory()); assertEquals(0.92, cfg.writefilter().memorylimit(), 0.0001); } private static ProtonConfig getProtonConfig(VespaModel model, String configId) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); model.getConfig(builder, configId); return new ProtonConfig(builder); } private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) { int numberOfHosts = 2; VespaModelTester tester = new VespaModelTester(); tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(Zone.defaultZone(), services, true); assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts)); Admin admin = model.getAdmin(); Logserver logserver = admin.getLogserver(); HostResource hostResource = logserver.getHostResource(); assertNotNull(hostResource.getService("logserver")); String containerServiceType = ContainerServiceType.LOGSERVER_CONTAINER.serviceName; assertNotNull(hostResource.getService(containerServiceType)); String configId = admin.getLogserver().getHostResource().getService(containerServiceType).getConfigId(); ApplicationMetadataConfig.Builder builder = new ApplicationMetadataConfig.Builder(); model.getConfig(builder, configId); ApplicationMetadataConfig cfg = new ApplicationMetadataConfig(builder); assertEquals(1, cfg.generation()); LogdConfig.Builder logdConfigBuilder = new LogdConfig.Builder(); model.getConfig(logdConfigBuilder, configId); LogdConfig logdConfig = new LogdConfig(logdConfigBuilder); assertTrue(logdConfig.logserver().use()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Id combinedId, ClusterSpec.Type type, VespaModel model) { assertEquals("Nodes in cluster " + id + " with type " + type + (combinedId != null ? ", combinedId " + combinedId : ""), nodeCount, model.hostSystem().getHosts().stream() .map(h -> h.spec().membership().get().cluster()) .filter(spec -> spec.id().equals(id) && spec.type().equals(type) && spec.combinedId().equals(Optional.ofNullable(combinedId))) .count()); } private static void assertProvisioned(int nodeCount, ClusterSpec.Id id, ClusterSpec.Type type, VespaModel model) { assertProvisioned(nodeCount, id, null, type, model); } }
Can we use the tenant_iam_role flag for this as well?
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) { if (deployState.zone().system() == SystemName.PublicCd) { BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store"); Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, null, null)); handler.addServerBindings(bindingPattern); cluster.addComponent(handler); } }
if (deployState.zone().system() == SystemName.PublicCd) {
private void addParameterStoreValidationHandler(ApplicationContainerCluster cluster, DeployState deployState) { if (deployState.featureFlags().tenantIamRole()) { BindingPattern bindingPattern = SystemBindingPattern.fromHttpPath("/validate-secret-store"); Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel("com.yahoo.jdisc.cloud.aws.AwsParameterStoreValidationHandler", null, null, null)); handler.addServerBindings(bindingPattern); cluster.addComponent(handler); } }
class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html"); private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE"; private static final String CONTAINER_TAG = "container"; private static final String DEPRECATED_CONTAINER_TAG = "jdisc"; private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables"; private static final int MIN_ZOOKEEPER_NODE_COUNT = 1; private static final int MAX_ZOOKEEPER_NODE_COUNT = 7; public enum Networking { disable, enable } private ApplicationPackage app; private final boolean standaloneBuilder; private final Networking networking; private final boolean rpcServerEnabled; private final boolean httpServerEnabled; protected DeployLogger log; public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG)); private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName(); private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName(); public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) { super(ContainerModel.class); this.standaloneBuilder = standaloneBuilder; this.networking = networking; this.rpcServerEnabled = !standaloneBuilder; this.httpServerEnabled = networking == Networking.enable; } @Override public List<ConfigModelId> handlesElements() { return configModelIds; } @Override public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) { log = modelContext.getDeployLogger(); app = modelContext.getApplicationPackage(); checkVersion(spec); checkTagName(spec, log); ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext); addClusterContent(cluster, spec, modelContext); cluster.setMessageBusEnabled(rpcServerEnabled); cluster.setRpcServerEnabled(rpcServerEnabled); cluster.setHttpServerEnabled(httpServerEnabled); model.setCluster(cluster); } private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) { return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() { @Override protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(), modelContext.getProducerId(), deployState); } }.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec); } private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { DeployState deployState = context.getDeployState(); DocumentFactoryBuilder.buildDocumentFactories(cluster, spec); addConfiguredComponents(deployState, cluster, spec); addSecretStore(cluster, spec); addRestApis(deployState, spec, cluster); addServlets(deployState, spec, cluster); addModelEvaluation(spec, cluster, context); addProcessing(deployState, spec, cluster); addSearch(deployState, spec, cluster); addDocproc(deployState, spec, cluster); addDocumentApi(spec, cluster); cluster.addDefaultHandlersExceptStatus(); addStatusHandlers(cluster, context.getDeployState().isHosted()); addUserHandlers(deployState, cluster, spec); addHttp(deployState, spec, cluster, context); addAccessLogs(deployState, cluster, spec); addRoutingAliases(cluster, spec, deployState.zone().environment()); addNodes(cluster, spec, context); addClientProviders(deployState, spec, cluster); addServerProviders(deployState, spec, cluster); addAthensCopperArgos(cluster, context); addZooKeeper(cluster, spec); addParameterStoreValidationHandler(cluster, deployState); } private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) { if (!hasZooKeeper(spec)) return; Element nodesElement = XML.getChild(spec, "nodes"); boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of"); if (isCombined) { throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper"); } long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count(); if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) { throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " + MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT + ", have " + nonRetiredNodes + " non-retired"); } cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade"); cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents); } public static void addReconfigurableZooKeeperServerComponents(Container container) { container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container)); container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container)); container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container)); } private static SimpleComponent zookeeperComponent(String idSpec, Container container) { String configId = container.getConfigId(); return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId)); } private void addSecretStore(ApplicationContainerCluster cluster, Element spec) { Element secretStoreElement = XML.getChild(spec, "secret-store"); if (secretStoreElement != null) { SecretStore secretStore = new SecretStore(); for (Element group : XML.getChildren(secretStoreElement, "group")) { secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment")); } cluster.setSecretStore(secretStore); } } private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) { if ( ! context.getDeployState().isHosted()) return; app.getDeployment().map(DeploymentSpec::fromXml) .ifPresent(deploymentSpec -> { addIdentityProvider(cluster, context.getDeployState().getProperties().configServerSpecs(), context.getDeployState().getProperties().loadBalancerName(), context.getDeployState().getProperties().ztsUrl(), context.getDeployState().getProperties().athenzDnsSuffix(), context.getDeployState().zone(), deploymentSpec); addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec); }); } private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) { cluster.getContainers().forEach(container -> { setRotations(container, endpoints, cluster.getName()); container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec))); }); } private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) { Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance()); if (instance.isEmpty()) return false; return instance.get().zones().stream() .anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) && declaredZone.active()); } private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) { var rotationsProperty = endpoints.stream() .filter(endpoint -> endpoint.clusterId().equals(containerClusterName)) .flatMap(endpoint -> endpoint.names().stream()) .collect(Collectors.toUnmodifiableSet()); container.setProp("rotations", String.join(",", rotationsProperty)); } private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) { if (environment != Environment.prod) return; Element aliases = XML.getChild(spec, "aliases"); for (Element alias : XML.getChildren(aliases, "service-alias")) { cluster.serviceAliases().add(XML.getValue(alias)); } for (Element alias : XML.getChildren(aliases, "endpoint-alias")) { cluster.endpointAliases().add(XML.getValue(alias)); } } private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element components : XML.getChildren(spec, "components")) { addIncludes(components); addConfiguredComponents(deployState, cluster, components, "component"); } addConfiguredComponents(deployState, cluster, spec, "component"); } protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) { if (isHostedVespa) { String name = "status.html"; Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING)); cluster.addComponent( new FileStatusHandlerComponent( name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE), SystemBindingPattern.fromHttpPath("/" + name))); } else { cluster.addVipHandler(); } } private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element clientSpec: XML.getChildren(spec, "client")) { cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec)); } } private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { addConfiguredComponents(deployState, cluster, spec, "server"); } protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { List<Element> accessLogElements = getAccessLogElements(spec); for (Element accessLog : accessLogElements) { AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent); } if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) cluster.addDefaultSearchAccessLog(); if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent)) { cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "qrs")); } else { cluster.addComponent(new ConnectionLogComponent(cluster, VoidConnectionLog.class, "qrs")); } } private List<Element> getAccessLogElements(Element spec) { return XML.getChildren(spec, "accesslog"); } protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element httpElement = XML.getChild(spec, "http"); if (httpElement != null) { cluster.setHttp(buildHttp(deployState, cluster, httpElement)); } if (isHostedTenantApplication(context)) { addHostedImplicitHttpIfNotPresent(cluster); addHostedImplicitAccessControlIfNotPresent(deployState, cluster); addDefaultConnectorHostedFilterBinding(cluster); addAdditionalHostedConnector(deployState, cluster, context); } } private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) { cluster.getHttp().getAccessControl() .ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ; } private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) { JettyHttpServer server = cluster.getHttp().getHttpServer().get(); String serverName = server.getComponentId().getName(); HostedSslConnectorFactory connectorFactory; if (deployState.endpointCertificateSecrets().isPresent()) { boolean authorizeClient = deployState.zone().system().isPublic(); if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) { throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https: } EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get(); boolean enforceHandshakeClientAuth = context.properties().featureFlags().useAccessControlTlsHandshakeClientAuth() && cluster.getHttp().getAccessControl() .map(accessControl -> accessControl.clientAuthentication) .map(clientAuth -> clientAuth.equals(AccessControl.ClientAuthentication.need)) .orElse(false); connectorFactory = authorizeClient ? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get()) : HostedSslConnectorFactory.withProvidedCertificate(serverName, endpointCertificateSecrets, enforceHandshakeClientAuth); } else { connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName); } cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory)); server.addConnector(connectorFactory); } private static boolean isHostedTenantApplication(ConfigModelContext context) { var deployState = context.getDeployState(); boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester(); return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication; } private static void addHostedImplicitHttpIfNotPresent(ApplicationContainerCluster cluster) { if(cluster.getHttp() == null) { cluster.setHttp(new Http(new FilterChains(cluster))); } JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null); if (httpServer == null) { httpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"), cluster, cluster.isHostedVespa()); cluster.getHttp().setHttpServer(httpServer); } int defaultPort = Defaults.getDefaults().vespaWebServicePort(); boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort); if (!defaultConnectorPresent) { httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build()); } } private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) { Http http = cluster.getHttp(); if (http.getAccessControl().isPresent()) return; AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); if (tenantDomain == null) return; new AccessControl.Builder(tenantDomain.value()) .setHandlers(cluster) .readEnabled(false) .writeEnabled(false) .clientAuthentication(AccessControl.ClientAuthentication.need) .build() .configureHttpFilterChains(http); } private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) { Http http = new HttpBuilder().build(deployState, cluster, httpElement); if (networking == Networking.disable) http.removeAllServers(); return http; } private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element restApiElem : XML.getChildren(spec, "rest-api")) { cluster.addRestApi( new RestApiBuilder().build(deployState, cluster, restApiElem)); } } private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element servletElem : XML.getChildren(spec, "servlet")) cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem)); } private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) { ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec); if (containerDocumentApi == null) return; cluster.setDocumentApi(containerDocumentApi); } private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec); if (containerDocproc == null) return; cluster.setDocproc(containerDocproc); ContainerDocproc.Options docprocOptions = containerDocproc.options; cluster.setMbusParams(new ApplicationContainerCluster.MbusParams( docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory)); } private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element searchElement = XML.getChild(spec, "search"); if (searchElement == null) return; addIncludes(searchElement); cluster.setSearch(buildSearch(deployState, cluster, searchElement)); addSearchHandler(cluster, searchElement); addGUIHandler(cluster); validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement); } private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element modelEvaluationElement = XML.getChild(spec, "model-evaluation"); if (modelEvaluationElement == null) return; RankProfileList profiles = context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty; cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles)); } private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element processingElement = XML.getChild(spec, "processing"); if (processingElement == null) return; addIncludes(processingElement); cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement), serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new)); validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement); } private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) { SearchChains searchChains = new DomSearchChainsBuilder(null, false) .build(deployState, containerCluster, producerSpec); ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options()); applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch); containerSearch.setQueryProfiles(deployState.getQueryProfiles()); containerSearch.setSemanticRules(deployState.getSemanticRules()); return containerSearch; } private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) { PageTemplates.validate(applicationPackage); containerSearch.setPageTemplates(PageTemplates.create(applicationPackage)); } private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element component: XML.getChildren(spec, "handler")) { cluster.addComponent( new DomHandlerBuilder(cluster).build(deployState, cluster, component)); } } private void checkVersion(Element spec) { String version = spec.getAttribute("version"); if ( ! Version.fromString(version).equals(new Version(1))) { throw new RuntimeException("Expected container version to be 1.0, but got " + version); } } private void checkTagName(Element spec, DeployLogger logger) { if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) { logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead."); } } private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { if (standaloneBuilder) addStandaloneNode(cluster); else addNodesFromXml(cluster, spec, context); } private void addStandaloneNode(ApplicationContainerCluster cluster) { ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa()); cluster.addContainers(Collections.singleton(container)); } static boolean incompatibleGCOptions(String jvmargs) { Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC"); Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS"); return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find()); } private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) { String options = (jvmGCOPtions != null) ? jvmGCOPtions : deployState.getProperties().jvmGCOptions(); return (options == null ||options.isEmpty()) ? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC) : options; } private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options."); } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); if (incompatibleGCOptions(jvmOptions)) { deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'"); cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } private static String extractAttribute(Element element, String attrName) { return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null; } void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger())); if (cluster.getJvmGCOptions().isEmpty()) { String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); } void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element jvmElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS)); applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } /** * Add nodes to cluster according to the given containerElement. * * Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set * of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed * simultaneously for all active config models. */ private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) { Element nodesElement = XML.getChild(containerElement, "nodes"); if (nodesElement == null) { cluster.addContainers(allocateWithoutNodesTag(cluster, context)); } else { List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) { extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context); } else { extractJvmTag(nodes, cluster, jvmElement, context); } applyRoutingAliasProperties(nodes, cluster); applyDefaultPreload(nodes, nodesElement); String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)); if (!environmentVars.isEmpty()) { cluster.setEnvironmentVars(environmentVars); } if (useCpuSocketAffinity(nodesElement)) AbstractService.distributeCpuSocketAffinity(nodes); cluster.addContainers(nodes); } } private static String getEnvironmentVariables(Element environmentVariables) { StringBuilder sb = new StringBuilder(); if (environmentVariables != null) { for (Element var: XML.getChildren(environmentVariables)) { sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' '); } } return sb.toString(); } private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) { if (nodesElement.hasAttribute("type")) return createNodesFromNodeType(cluster, nodesElement, context); else if (nodesElement.hasAttribute("of")) return createNodesFromContentServiceReference(cluster, nodesElement, context); else if (nodesElement.hasAttribute("count")) return createNodesFromNodeCount(cluster, containerElement, nodesElement, context); else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed()) return createNodesFromNodeCount(cluster, containerElement, nodesElement, context); else return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement); } private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) { if (!cluster.serviceAliases().isEmpty()) { result.forEach(container -> { container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(","))); }); } if (!cluster.endpointAliases().isEmpty()) { result.forEach(container -> { container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(","))); }); } } private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) { if (memoryPercentage == null || memoryPercentage.isEmpty()) return; memoryPercentage = memoryPercentage.trim(); if ( ! memoryPercentage.endsWith("%")) throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim(); try { cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage)); } catch (NumberFormatException e) { throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); } } /** Allocate a container cluster without a nodes tag */ private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) { DeployState deployState = context.getDeployState(); HostSystem hostSystem = cluster.hostSystem(); if (deployState.isHosted()) { ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(deployState.getWantedNodeVespaVersion()) .dockerImageRepository(deployState.getWantedDockerImageRepo()) .build(); int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1; Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()), false, !deployState.getProperties().isBootstrap()); var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log); return createNodesFromHosts(log, hosts, cluster); } return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context); } private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) { ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa()); node.setHostResource(host); node.initService(context.getDeployLogger()); return List.of(node); } private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) { NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(), ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()), log, hasZooKeeper(containerElement)); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodeType type = NodeType.valueOf(nodesElement.getAttribute("type")); ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(context.getDeployState().getWantedNodeVespaVersion()) .dockerImageRepository(context.getDeployState().getWantedDockerImageRepo()) .build(); Map<HostResource, ClusterMembership> hosts = cluster.getRoot().hostSystem().allocateHosts(clusterSpec, Capacity.fromRequiredNodeType(type), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodeSpecification; try { nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(cluster + " contains an invalid reference", e); } String referenceId = nodesElement.getAttribute("of"); cluster.setHostClusterId(referenceId); Map<HostResource, ClusterMembership> hosts = StorageGroup.provisionHosts(nodeSpecification, referenceId, cluster.getRoot().hostSystem(), context.getDeployLogger()); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) { List<ApplicationContainer> nodes = new ArrayList<>(); for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) { String id = "container." + entry.getValue().index(); ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa()); container.setHostResource(entry.getKey()); container.initService(deployLogger); nodes.add(container); } return nodes; } private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) { List<ApplicationContainer> nodes = new ArrayList<>(); int nodeIndex = 0; for (Element nodeElem: XML.getChildren(nodesElement, "node")) { nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem)); nodeIndex++; } return nodes; } private static boolean useCpuSocketAffinity(Element nodesElement) { if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)) return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)); else return false; } private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) { for (Container container: containers) { if (container.getAssignedJvmOptions().isEmpty()) container.prependJvmOptions(jvmArgs); } } private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) { if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return; for (Container container: containers) container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)); } private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) { cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(), "com.yahoo.search.searchchain.ExecutionFactory")); cluster.addComponent( new SearchHandler( cluster, serverBindings(searchElement, SearchHandler.DEFAULT_BINDING), ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null))); } private void addGUIHandler(ApplicationContainerCluster cluster) { Handler<?> guiHandler = new GUIHandler(); guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH)); cluster.addComponent(guiHandler); } private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) { List<Element> bindings = XML.getChildren(searchElement, "binding"); if (bindings.isEmpty()) return List.of(defaultBindings); return toBindingList(bindings); } private List<BindingPattern> toBindingList(List<Element> bindingElements) { List<BindingPattern> result = new ArrayList<>(); for (Element element: bindingElements) { String text = element.getTextContent().trim(); if (!text.isEmpty()) result.add(UserBindingPattern.fromPattern(text)); } return result; } private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) { Element documentApiElement = XML.getChild(spec, "document-api"); if (documentApiElement == null) return null; ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement); return new ContainerDocumentApi(cluster, documentApiOptions); } private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { Element docprocElement = XML.getChild(spec, "document-processing"); if (docprocElement == null) return null; addIncludes(docprocElement); DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement); ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement); return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder); } private void addIncludes(Element parentElement) { List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE); if (includes.isEmpty()) { return; } if (app == null) { throw new IllegalArgumentException("Element <include> given in XML config, but no application package given."); } for (Element include : includes) { addInclude(parentElement, include); } } private void addInclude(Element parentElement, Element include) { String dirName = include.getAttribute(IncludeDirs.DIR); app.validateIncludeDir(dirName); List<Element> includedFiles = Xml.allElemsFromPath(app, dirName); for (Element includedFile : includedFiles) { List<Element> includedSubElements = XML.getChildren(includedFile); for (Element includedSubElement : includedSubElements) { Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true); parentElement.appendChild(copiedNode); } } } private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName) { for (Element node : XML.getChildren(spec, componentName)) { cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName, Consumer<Element> elementValidator) { for (Element node : XML.getChildren(spec, componentName)) { elementValidator.accept(node); cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private void addIdentityProvider(ApplicationContainerCluster cluster, List<ConfigServerSpec> configServerSpecs, HostName loadBalancerName, URI ztsUrl, String athenzDnsSuffix, Zone zone, DeploymentSpec spec) { spec.athenzDomain() .ifPresent(domain -> { AthenzService service = spec.instance(app.getApplicationId().instance()) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> spec.athenzService()) .orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'")); String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix; IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone); cluster.addComponent(identityProvider); cluster.getContainers().forEach(container -> { container.setProp("identity.domain", domain.value()); container.setProp("identity.service", service.value()); }); }); } private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) { return Optional.ofNullable(loadbalancerName) .orElseGet( () -> HostName.from(configServerSpecs.stream() .findFirst() .map(ConfigServerSpec::getHostName) .orElse("unknown") )); } private static boolean hasZooKeeper(Element spec) { return XML.getChild(spec, "zookeeper") != null; } /** Disallow renderers named "XmlRenderer" or "JsonRenderer" */ private static void validateRendererElement(Element element) { String idAttr = element.getAttribute("id"); if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) { throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr)); } } public static boolean isContainerTag(Element element) { return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName()); } }
class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html"); private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE"; private static final String CONTAINER_TAG = "container"; private static final String DEPRECATED_CONTAINER_TAG = "jdisc"; private static final String ENVIRONMENT_VARIABLES_ELEMENT = "environment-variables"; private static final int MIN_ZOOKEEPER_NODE_COUNT = 1; private static final int MAX_ZOOKEEPER_NODE_COUNT = 7; public enum Networking { disable, enable } private ApplicationPackage app; private final boolean standaloneBuilder; private final Networking networking; private final boolean rpcServerEnabled; private final boolean httpServerEnabled; protected DeployLogger log; public static final List<ConfigModelId> configModelIds = ImmutableList.of(ConfigModelId.fromName(CONTAINER_TAG), ConfigModelId.fromName(DEPRECATED_CONTAINER_TAG)); private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName(); private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName(); public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) { super(ContainerModel.class); this.standaloneBuilder = standaloneBuilder; this.networking = networking; this.rpcServerEnabled = !standaloneBuilder; this.httpServerEnabled = networking == Networking.enable; } @Override public List<ConfigModelId> handlesElements() { return configModelIds; } @Override public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) { log = modelContext.getDeployLogger(); app = modelContext.getApplicationPackage(); checkVersion(spec); checkTagName(spec, log); ApplicationContainerCluster cluster = createContainerCluster(spec, modelContext); addClusterContent(cluster, spec, modelContext); cluster.setMessageBusEnabled(rpcServerEnabled); cluster.setRpcServerEnabled(rpcServerEnabled); cluster.setHttpServerEnabled(httpServerEnabled); model.setCluster(cluster); } private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) { return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() { @Override protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) { return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(), modelContext.getProducerId(), deployState); } }.build(modelContext.getDeployState(), modelContext.getParentProducer(), spec); } private void addClusterContent(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { DeployState deployState = context.getDeployState(); DocumentFactoryBuilder.buildDocumentFactories(cluster, spec); addConfiguredComponents(deployState, cluster, spec); addSecretStore(cluster, spec); addRestApis(deployState, spec, cluster); addServlets(deployState, spec, cluster); addModelEvaluation(spec, cluster, context); addProcessing(deployState, spec, cluster); addSearch(deployState, spec, cluster); addDocproc(deployState, spec, cluster); addDocumentApi(spec, cluster); cluster.addDefaultHandlersExceptStatus(); addStatusHandlers(cluster, context.getDeployState().isHosted()); addUserHandlers(deployState, cluster, spec); addHttp(deployState, spec, cluster, context); addAccessLogs(deployState, cluster, spec); addRoutingAliases(cluster, spec, deployState.zone().environment()); addNodes(cluster, spec, context); addClientProviders(deployState, spec, cluster); addServerProviders(deployState, spec, cluster); addAthensCopperArgos(cluster, context); addZooKeeper(cluster, spec); addParameterStoreValidationHandler(cluster, deployState); } private void addZooKeeper(ApplicationContainerCluster cluster, Element spec) { if (!hasZooKeeper(spec)) return; Element nodesElement = XML.getChild(spec, "nodes"); boolean isCombined = nodesElement != null && nodesElement.hasAttribute("of"); if (isCombined) { throw new IllegalArgumentException("A combined cluster cannot run ZooKeeper"); } long nonRetiredNodes = cluster.getContainers().stream().filter(c -> !c.isRetired()).count(); if (nonRetiredNodes < MIN_ZOOKEEPER_NODE_COUNT || nonRetiredNodes > MAX_ZOOKEEPER_NODE_COUNT || nonRetiredNodes % 2 == 0) { throw new IllegalArgumentException("Cluster with ZooKeeper needs an odd number of nodes, between " + MIN_ZOOKEEPER_NODE_COUNT + " and " + MAX_ZOOKEEPER_NODE_COUNT + ", have " + nonRetiredNodes + " non-retired"); } cluster.addSimpleComponent("com.yahoo.vespa.curator.Curator", null, "zkfacade"); cluster.getContainers().forEach(ContainerModelBuilder::addReconfigurableZooKeeperServerComponents); } public static void addReconfigurableZooKeeperServerComponents(Container container) { container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", container)); container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.Reconfigurer", container)); container.addComponent(zookeeperComponent("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", container)); } private static SimpleComponent zookeeperComponent(String idSpec, Container container) { String configId = container.getConfigId(); return new SimpleComponent(new ComponentModel(idSpec, null, "zookeeper-server", configId)); } private void addSecretStore(ApplicationContainerCluster cluster, Element spec) { Element secretStoreElement = XML.getChild(spec, "secret-store"); if (secretStoreElement != null) { SecretStore secretStore = new SecretStore(); for (Element group : XML.getChildren(secretStoreElement, "group")) { secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment")); } cluster.setSecretStore(secretStore); } } private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) { if ( ! context.getDeployState().isHosted()) return; app.getDeployment().map(DeploymentSpec::fromXml) .ifPresent(deploymentSpec -> { addIdentityProvider(cluster, context.getDeployState().getProperties().configServerSpecs(), context.getDeployState().getProperties().loadBalancerName(), context.getDeployState().getProperties().ztsUrl(), context.getDeployState().getProperties().athenzDnsSuffix(), context.getDeployState().zone(), deploymentSpec); addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec); }); } private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) { cluster.getContainers().forEach(container -> { setRotations(container, endpoints, cluster.getName()); container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec))); }); } private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) { Optional<DeploymentInstanceSpec> instance = spec.instance(app.getApplicationId().instance()); if (instance.isEmpty()) return false; return instance.get().zones().stream() .anyMatch(declaredZone -> declaredZone.concerns(zone.environment(), Optional.of(zone.region())) && declaredZone.active()); } private void setRotations(Container container, Set<ContainerEndpoint> endpoints, String containerClusterName) { var rotationsProperty = endpoints.stream() .filter(endpoint -> endpoint.clusterId().equals(containerClusterName)) .flatMap(endpoint -> endpoint.names().stream()) .collect(Collectors.toUnmodifiableSet()); container.setProp("rotations", String.join(",", rotationsProperty)); } private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) { if (environment != Environment.prod) return; Element aliases = XML.getChild(spec, "aliases"); for (Element alias : XML.getChildren(aliases, "service-alias")) { cluster.serviceAliases().add(XML.getValue(alias)); } for (Element alias : XML.getChildren(aliases, "endpoint-alias")) { cluster.endpointAliases().add(XML.getValue(alias)); } } private void addConfiguredComponents(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element components : XML.getChildren(spec, "components")) { addIncludes(components); addConfiguredComponents(deployState, cluster, components, "component"); } addConfiguredComponents(deployState, cluster, spec, "component"); } protected void addStatusHandlers(ApplicationContainerCluster cluster, boolean isHostedVespa) { if (isHostedVespa) { String name = "status.html"; Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_SETTING)); cluster.addComponent( new FileStatusHandlerComponent( name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE), SystemBindingPattern.fromHttpPath("/" + name))); } else { cluster.addVipHandler(); } } private void addClientProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element clientSpec: XML.getChildren(spec, "client")) { cluster.addComponent(new DomClientProviderBuilder(cluster).build(deployState, cluster, clientSpec)); } } private void addServerProviders(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { addConfiguredComponents(deployState, cluster, spec, "server"); } protected void addAccessLogs(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { List<Element> accessLogElements = getAccessLogElements(spec); for (Element accessLog : accessLogElements) { AccessLogBuilder.buildIfNotDisabled(deployState, cluster, accessLog).ifPresent(cluster::addComponent); } if (accessLogElements.isEmpty() && deployState.getAccessLoggingEnabledByDefault()) cluster.addDefaultSearchAccessLog(); if (cluster.getAllComponents().stream().anyMatch(component -> component instanceof AccessLogComponent)) { cluster.addComponent(new ConnectionLogComponent(cluster, FileConnectionLog.class, "qrs")); } } private List<Element> getAccessLogElements(Element spec) { return XML.getChildren(spec, "accesslog"); } protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element httpElement = XML.getChild(spec, "http"); if (httpElement != null) { cluster.setHttp(buildHttp(deployState, cluster, httpElement)); } if (isHostedTenantApplication(context)) { addHostedImplicitHttpIfNotPresent(cluster); addHostedImplicitAccessControlIfNotPresent(deployState, cluster); addDefaultConnectorHostedFilterBinding(cluster); addAdditionalHostedConnector(deployState, cluster, context); } } private void addDefaultConnectorHostedFilterBinding(ApplicationContainerCluster cluster) { cluster.getHttp().getAccessControl() .ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ; } private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) { JettyHttpServer server = cluster.getHttp().getHttpServer().get(); String serverName = server.getComponentId().getName(); HostedSslConnectorFactory connectorFactory; if (deployState.endpointCertificateSecrets().isPresent()) { boolean authorizeClient = deployState.zone().system().isPublic(); if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) { throw new RuntimeException("Client certificate authority security/clients.pem is missing - see: https: } EndpointCertificateSecrets endpointCertificateSecrets = deployState.endpointCertificateSecrets().get(); boolean enforceHandshakeClientAuth = context.properties().featureFlags().useAccessControlTlsHandshakeClientAuth() && cluster.getHttp().getAccessControl() .map(accessControl -> accessControl.clientAuthentication) .map(clientAuth -> clientAuth.equals(AccessControl.ClientAuthentication.need)) .orElse(false); connectorFactory = authorizeClient ? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get()) : HostedSslConnectorFactory.withProvidedCertificate(serverName, endpointCertificateSecrets, enforceHandshakeClientAuth); } else { connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName); } cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory)); server.addConnector(connectorFactory); } private static boolean isHostedTenantApplication(ConfigModelContext context) { var deployState = context.getDeployState(); boolean isTesterApplication = deployState.getProperties().applicationId().instance().isTester(); return deployState.isHosted() && context.getApplicationType() == ApplicationType.DEFAULT && !isTesterApplication; } private static void addHostedImplicitHttpIfNotPresent(ApplicationContainerCluster cluster) { if(cluster.getHttp() == null) { cluster.setHttp(new Http(new FilterChains(cluster))); } JettyHttpServer httpServer = cluster.getHttp().getHttpServer().orElse(null); if (httpServer == null) { httpServer = new JettyHttpServer(new ComponentId("DefaultHttpServer"), cluster, cluster.isHostedVespa()); cluster.getHttp().setHttpServer(httpServer); } int defaultPort = Defaults.getDefaults().vespaWebServicePort(); boolean defaultConnectorPresent = httpServer.getConnectorFactories().stream().anyMatch(connector -> connector.getListenPort() == defaultPort); if (!defaultConnectorPresent) { httpServer.addConnector(new ConnectorFactory.Builder("SearchServer", defaultPort).build()); } } private void addHostedImplicitAccessControlIfNotPresent(DeployState deployState, ApplicationContainerCluster cluster) { Http http = cluster.getHttp(); if (http.getAccessControl().isPresent()) return; AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); if (tenantDomain == null) return; new AccessControl.Builder(tenantDomain.value()) .setHandlers(cluster) .readEnabled(false) .writeEnabled(false) .clientAuthentication(AccessControl.ClientAuthentication.need) .build() .configureHttpFilterChains(http); } private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) { Http http = new HttpBuilder().build(deployState, cluster, httpElement); if (networking == Networking.disable) http.removeAllServers(); return http; } private void addRestApis(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element restApiElem : XML.getChildren(spec, "rest-api")) { cluster.addRestApi( new RestApiBuilder().build(deployState, cluster, restApiElem)); } } private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { for (Element servletElem : XML.getChildren(spec, "servlet")) cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem)); } private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) { ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec); if (containerDocumentApi == null) return; cluster.setDocumentApi(containerDocumentApi); } private void addDocproc(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { ContainerDocproc containerDocproc = buildDocproc(deployState, cluster, spec); if (containerDocproc == null) return; cluster.setDocproc(containerDocproc); ContainerDocproc.Options docprocOptions = containerDocproc.options; cluster.setMbusParams(new ApplicationContainerCluster.MbusParams( docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory)); } private void addSearch(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element searchElement = XML.getChild(spec, "search"); if (searchElement == null) return; addIncludes(searchElement); cluster.setSearch(buildSearch(deployState, cluster, searchElement)); addSearchHandler(cluster, searchElement); addGUIHandler(cluster); validateAndAddConfiguredComponents(deployState, cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement); } private void addModelEvaluation(Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element modelEvaluationElement = XML.getChild(spec, "model-evaluation"); if (modelEvaluationElement == null) return; RankProfileList profiles = context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty; cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles)); } private void addProcessing(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { Element processingElement = XML.getChild(spec, "processing"); if (processingElement == null) return; addIncludes(processingElement); cluster.setProcessingChains(new DomProcessingBuilder(null).build(deployState, cluster, processingElement), serverBindings(processingElement, ProcessingChains.defaultBindings).toArray(BindingPattern[]::new)); validateAndAddConfiguredComponents(deployState, cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement); } private ContainerSearch buildSearch(DeployState deployState, ApplicationContainerCluster containerCluster, Element producerSpec) { SearchChains searchChains = new DomSearchChainsBuilder(null, false) .build(deployState, containerCluster, producerSpec); ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options()); applyApplicationPackageDirectoryConfigs(deployState.getApplicationPackage(), containerSearch); containerSearch.setQueryProfiles(deployState.getQueryProfiles()); containerSearch.setSemanticRules(deployState.getSemanticRules()); return containerSearch; } private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) { PageTemplates.validate(applicationPackage); containerSearch.setPageTemplates(PageTemplates.create(applicationPackage)); } private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { for (Element component: XML.getChildren(spec, "handler")) { cluster.addComponent( new DomHandlerBuilder(cluster).build(deployState, cluster, component)); } } private void checkVersion(Element spec) { String version = spec.getAttribute("version"); if ( ! Version.fromString(version).equals(new Version(1))) { throw new RuntimeException("Expected container version to be 1.0, but got " + version); } } private void checkTagName(Element spec, DeployLogger logger) { if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) { logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead."); } } private void addNodes(ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { if (standaloneBuilder) addStandaloneNode(cluster); else addNodesFromXml(cluster, spec, context); } private void addStandaloneNode(ApplicationContainerCluster cluster) { ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa()); cluster.addContainers(Collections.singleton(container)); } static boolean incompatibleGCOptions(String jvmargs) { Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC"); Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS"); return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find()); } private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) { String options = (jvmGCOPtions != null) ? jvmGCOPtions : deployState.getProperties().jvmGCOptions(); return (options == null ||options.isEmpty()) ? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC) : options; } private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options."); } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); if (incompatibleGCOptions(jvmOptions)) { deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'"); cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } private static String extractAttribute(Element element, String attrName) { return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null; } void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger())); if (cluster.getJvmGCOptions().isEmpty()) { String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); } void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, Element jvmElement, ConfigModelContext context) { applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS)); applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS); cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); } /** * Add nodes to cluster according to the given containerElement. * * Note: DO NOT change allocation behaviour to allow version X and Y of the config-model to allocate a different set * of nodes. Such changes must be guarded by a common condition (e.g. feature flag) so the behaviour can be changed * simultaneously for all active config models. */ private void addNodesFromXml(ApplicationContainerCluster cluster, Element containerElement, ConfigModelContext context) { Element nodesElement = XML.getChild(containerElement, "nodes"); if (nodesElement == null) { cluster.addContainers(allocateWithoutNodesTag(cluster, context)); } else { List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) { extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context); } else { extractJvmTag(nodes, cluster, jvmElement, context); } applyRoutingAliasProperties(nodes, cluster); applyDefaultPreload(nodes, nodesElement); String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)); if (!environmentVars.isEmpty()) { cluster.setEnvironmentVars(environmentVars); } if (useCpuSocketAffinity(nodesElement)) AbstractService.distributeCpuSocketAffinity(nodes); cluster.addContainers(nodes); } } private static String getEnvironmentVariables(Element environmentVariables) { StringBuilder sb = new StringBuilder(); if (environmentVariables != null) { for (Element var: XML.getChildren(environmentVariables)) { sb.append(var.getNodeName()).append('=').append(var.getTextContent()).append(' '); } } return sb.toString(); } private List<ApplicationContainer> createNodes(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) { if (nodesElement.hasAttribute("type")) return createNodesFromNodeType(cluster, nodesElement, context); else if (nodesElement.hasAttribute("of")) return createNodesFromContentServiceReference(cluster, nodesElement, context); else if (nodesElement.hasAttribute("count")) return createNodesFromNodeCount(cluster, containerElement, nodesElement, context); else if (cluster.isHostedVespa() && cluster.getZone().environment().isManuallyDeployed()) return createNodesFromNodeCount(cluster, containerElement, nodesElement, context); else return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement); } private static void applyRoutingAliasProperties(List<ApplicationContainer> result, ApplicationContainerCluster cluster) { if (!cluster.serviceAliases().isEmpty()) { result.forEach(container -> { container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(","))); }); } if (!cluster.endpointAliases().isEmpty()) { result.forEach(container -> { container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(","))); }); } } private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) { if (memoryPercentage == null || memoryPercentage.isEmpty()) return; memoryPercentage = memoryPercentage.trim(); if ( ! memoryPercentage.endsWith("%")) throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim(); try { cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage)); } catch (NumberFormatException e) { throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster + " must be an integer percentage ending by the '%' sign"); } } /** Allocate a container cluster without a nodes tag */ private List<ApplicationContainer> allocateWithoutNodesTag(ApplicationContainerCluster cluster, ConfigModelContext context) { DeployState deployState = context.getDeployState(); HostSystem hostSystem = cluster.hostSystem(); if (deployState.isHosted()) { ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(deployState.getWantedNodeVespaVersion()) .dockerImageRepository(deployState.getWantedDockerImageRepo()) .build(); int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1; Capacity capacity = Capacity.from(new ClusterResources(nodeCount, 1, NodeResources.unspecified()), false, !deployState.getProperties().isBootstrap()); var hosts = hostSystem.allocateHosts(clusterSpec, capacity, log); return createNodesFromHosts(log, hosts, cluster); } else { return singleHostContainerCluster(cluster, hostSystem.getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC), context); } } private List<ApplicationContainer> singleHostContainerCluster(ApplicationContainerCluster cluster, HostResource host, ConfigModelContext context) { ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa()); node.setHostResource(host); node.initService(context.getDeployLogger()); return List.of(node); } private List<ApplicationContainer> createNodesFromNodeCount(ApplicationContainerCluster cluster, Element containerElement, Element nodesElement, ConfigModelContext context) { NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().hostSystem(), ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName()), log, hasZooKeeper(containerElement)); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromNodeType(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodeType type = NodeType.valueOf(nodesElement.getAttribute("type")); ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from(cluster.getName())) .vespaVersion(context.getDeployState().getWantedNodeVespaVersion()) .dockerImageRepository(context.getDeployState().getWantedDockerImageRepo()) .build(); Map<HostResource, ClusterMembership> hosts = cluster.getRoot().hostSystem().allocateHosts(clusterSpec, Capacity.fromRequiredNodeType(type), log); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromContentServiceReference(ApplicationContainerCluster cluster, Element nodesElement, ConfigModelContext context) { NodesSpecification nodeSpecification; try { nodeSpecification = NodesSpecification.from(new ModelElement(nodesElement), context); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(cluster + " contains an invalid reference", e); } String referenceId = nodesElement.getAttribute("of"); cluster.setHostClusterId(referenceId); Map<HostResource, ClusterMembership> hosts = StorageGroup.provisionHosts(nodeSpecification, referenceId, cluster.getRoot().hostSystem(), context.getDeployLogger()); return createNodesFromHosts(context.getDeployLogger(), hosts, cluster); } private List<ApplicationContainer> createNodesFromHosts(DeployLogger deployLogger, Map<HostResource, ClusterMembership> hosts, ApplicationContainerCluster cluster) { List<ApplicationContainer> nodes = new ArrayList<>(); for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) { String id = "container." + entry.getValue().index(); ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa()); container.setHostResource(entry.getKey()); container.initService(deployLogger); nodes.add(container); } return nodes; } private List<ApplicationContainer> createNodesFromNodeList(DeployState deployState, ApplicationContainerCluster cluster, Element nodesElement) { List<ApplicationContainer> nodes = new ArrayList<>(); int nodeIndex = 0; for (Element nodeElem: XML.getChildren(nodesElement, "node")) { nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(deployState, cluster, nodeElem)); nodeIndex++; } return nodes; } private static boolean useCpuSocketAffinity(Element nodesElement) { if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)) return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME)); else return false; } private static void applyNodesTagJvmArgs(List<ApplicationContainer> containers, String jvmArgs) { for (Container container: containers) { if (container.getAssignedJvmOptions().isEmpty()) container.prependJvmOptions(jvmArgs); } } private static void applyDefaultPreload(List<ApplicationContainer> containers, Element nodesElement) { if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return; for (Container container: containers) container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)); } private void addSearchHandler(ApplicationContainerCluster cluster, Element searchElement) { cluster.addComponent(new ProcessingHandler<>(cluster.getSearch().getChains(), "com.yahoo.search.searchchain.ExecutionFactory")); cluster.addComponent( new SearchHandler( cluster, serverBindings(searchElement, SearchHandler.DEFAULT_BINDING), ContainerThreadpool.UserOptions.fromXml(searchElement).orElse(null))); } private void addGUIHandler(ApplicationContainerCluster cluster) { Handler<?> guiHandler = new GUIHandler(); guiHandler.addServerBindings(SystemBindingPattern.fromHttpPath(GUIHandler.BINDING_PATH)); cluster.addComponent(guiHandler); } private List<BindingPattern> serverBindings(Element searchElement, BindingPattern... defaultBindings) { List<Element> bindings = XML.getChildren(searchElement, "binding"); if (bindings.isEmpty()) return List.of(defaultBindings); return toBindingList(bindings); } private List<BindingPattern> toBindingList(List<Element> bindingElements) { List<BindingPattern> result = new ArrayList<>(); for (Element element: bindingElements) { String text = element.getTextContent().trim(); if (!text.isEmpty()) result.add(UserBindingPattern.fromPattern(text)); } return result; } private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) { Element documentApiElement = XML.getChild(spec, "document-api"); if (documentApiElement == null) return null; ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement); return new ContainerDocumentApi(cluster, documentApiOptions); } private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { Element docprocElement = XML.getChild(spec, "document-processing"); if (docprocElement == null) return null; addIncludes(docprocElement); DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(deployState, cluster, docprocElement); ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement); return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder); } private void addIncludes(Element parentElement) { List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE); if (includes.isEmpty()) { return; } if (app == null) { throw new IllegalArgumentException("Element <include> given in XML config, but no application package given."); } for (Element include : includes) { addInclude(parentElement, include); } } private void addInclude(Element parentElement, Element include) { String dirName = include.getAttribute(IncludeDirs.DIR); app.validateIncludeDir(dirName); List<Element> includedFiles = Xml.allElemsFromPath(app, dirName); for (Element includedFile : includedFiles) { List<Element> includedSubElements = XML.getChildren(includedFile); for (Element includedSubElement : includedSubElements) { Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true); parentElement.appendChild(copiedNode); } } } private static void addConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName) { for (Element node : XML.getChildren(spec, componentName)) { cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private static void validateAndAddConfiguredComponents(DeployState deployState, ContainerCluster<? extends Container> cluster, Element spec, String componentName, Consumer<Element> elementValidator) { for (Element node : XML.getChildren(spec, componentName)) { elementValidator.accept(node); cluster.addComponent(new DomComponentBuilder().build(deployState, cluster, node)); } } private void addIdentityProvider(ApplicationContainerCluster cluster, List<ConfigServerSpec> configServerSpecs, HostName loadBalancerName, URI ztsUrl, String athenzDnsSuffix, Zone zone, DeploymentSpec spec) { spec.athenzDomain() .ifPresent(domain -> { AthenzService service = spec.instance(app.getApplicationId().instance()) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> spec.athenzService()) .orElseThrow(() -> new RuntimeException("Missing Athenz service configuration in instance '" + app.getApplicationId().instance() + "'")); String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix; IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs), ztsUrl, zoneDnsSuffix, zone); cluster.addComponent(identityProvider); cluster.getContainers().forEach(container -> { container.setProp("identity.domain", domain.value()); container.setProp("identity.service", service.value()); }); }); } private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) { return Optional.ofNullable(loadbalancerName) .orElseGet( () -> HostName.from(configServerSpecs.stream() .findFirst() .map(ConfigServerSpec::getHostName) .orElse("unknown") )); } private static boolean hasZooKeeper(Element spec) { return XML.getChild(spec, "zookeeper") != null; } /** Disallow renderers named "XmlRenderer" or "JsonRenderer" */ private static void validateRendererElement(Element element) { String idAttr = element.getAttribute("id"); if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) { throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr)); } } public static boolean isContainerTag(Element element) { return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName()); } }
is hostname really hostname, or does it include protocol? port? If not include "https://". If port is included we need to replace the port.
private URI getUri(Application application) { var hostname = application.getModel().getHosts() .stream() .filter(hostInfo -> hostInfo.getServices() .stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .count() > 0) .map(HostInfo::getHostname) .findFirst().orElseThrow(); return URI.create(hostname + AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX); }
return URI.create(hostname + AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX);
private URI getUri(Application application) { var hostname = application.getModel().getHosts() .stream() .filter(hostInfo -> hostInfo.getServices() .stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .count() > 0) .map(HostInfo::getHostname) .findFirst().orElseThrow(); return URI.create(PROTOCOL + hostname + AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX); }
class SecretStoreValidator { private static final String AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX = ":4080/validate-secret-store"; private final SecretStore secretStore; private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build(); public SecretStoreValidator(SecretStore secretStore) { this.secretStore = secretStore; } public HttpResponse validateSecretStore(Application application, TenantSecretStore tenantSecretStore, String tenantSecretName) { var slime = toSlime(tenantSecretStore, tenantSecretName); var uri = getUri(application); return postRequest(uri, slime); } private Slime toSlime(TenantSecretStore tenantSecretStore, String tenantSecretName) { var slime = new Slime(); var cursor = slime.setObject(); cursor.setString("externalId", secretStore.getSecret(tenantSecretName)); cursor.setString("awsId", tenantSecretStore.getAwsId()); cursor.setString("name", tenantSecretStore.getName()); cursor.setString("role", tenantSecretStore.getRole()); return slime; } private HttpResponse postRequest(URI uri, Slime slime) { var postRequest = new HttpPost(uri); var data = uncheck(() -> SlimeUtils.toJsonBytes(slime)); var entity = new ByteArrayEntity(data); postRequest.setEntity(entity); try (CloseableHttpResponse response = httpClient.execute(postRequest)){ return new ProxyResponse(response); } catch (IOException e) { return HttpErrorResponse.internalServerError( String.format("Failed to post request to %s: %s", uri, Exceptions.toMessageString(e)) ); } } }
class SecretStoreValidator { private static final String PROTOCOL = "http: private static final String AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX = ":4080/validate-secret-store"; private final SecretStore secretStore; private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build(); public SecretStoreValidator(SecretStore secretStore) { this.secretStore = secretStore; } public HttpResponse validateSecretStore(Application application, TenantSecretStore tenantSecretStore, String tenantSecretName) { var slime = toSlime(tenantSecretStore, tenantSecretName); var uri = getUri(application); return postRequest(uri, slime); } private Slime toSlime(TenantSecretStore tenantSecretStore, String tenantSecretName) { var slime = new Slime(); var cursor = slime.setObject(); cursor.setString("externalId", secretStore.getSecret(tenantSecretName)); cursor.setString("awsId", tenantSecretStore.getAwsId()); cursor.setString("name", tenantSecretStore.getName()); cursor.setString("role", tenantSecretStore.getRole()); return slime; } private HttpResponse postRequest(URI uri, Slime slime) { var postRequest = new HttpPost(uri); var data = uncheck(() -> SlimeUtils.toJsonBytes(slime)); var entity = new ByteArrayEntity(data); postRequest.setEntity(entity); try { return new ProxyResponse(httpClient.execute(postRequest)); } catch (IOException e) { return HttpErrorResponse.internalServerError( String.format("Failed to post request to %s: %s", uri, Exceptions.toMessageString(e)) ); } } }
Do we want to return a structured response?
private HttpResponse postRequest(URI uri, Slime slime) { var postRequest = new HttpPost(uri); var data = uncheck(() -> SlimeUtils.toJsonBytes(slime)); var entity = new ByteArrayEntity(data); postRequest.setEntity(entity); try (CloseableHttpResponse response = httpClient.execute(postRequest)){ return new ProxyResponse(response); } catch (IOException e) { return HttpErrorResponse.internalServerError( String.format("Failed to post request to %s: %s", uri, Exceptions.toMessageString(e)) ); } }
);
private HttpResponse postRequest(URI uri, Slime slime) { var postRequest = new HttpPost(uri); var data = uncheck(() -> SlimeUtils.toJsonBytes(slime)); var entity = new ByteArrayEntity(data); postRequest.setEntity(entity); try { return new ProxyResponse(httpClient.execute(postRequest)); } catch (IOException e) { return HttpErrorResponse.internalServerError( String.format("Failed to post request to %s: %s", uri, Exceptions.toMessageString(e)) ); } }
class SecretStoreValidator { private static final String AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX = ":4080/validate-secret-store"; private final SecretStore secretStore; private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build(); public SecretStoreValidator(SecretStore secretStore) { this.secretStore = secretStore; } public HttpResponse validateSecretStore(Application application, TenantSecretStore tenantSecretStore, String tenantSecretName) { var slime = toSlime(tenantSecretStore, tenantSecretName); var uri = getUri(application); return postRequest(uri, slime); } private Slime toSlime(TenantSecretStore tenantSecretStore, String tenantSecretName) { var slime = new Slime(); var cursor = slime.setObject(); cursor.setString("externalId", secretStore.getSecret(tenantSecretName)); cursor.setString("awsId", tenantSecretStore.getAwsId()); cursor.setString("name", tenantSecretStore.getName()); cursor.setString("role", tenantSecretStore.getRole()); return slime; } private URI getUri(Application application) { var hostname = application.getModel().getHosts() .stream() .filter(hostInfo -> hostInfo.getServices() .stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .count() > 0) .map(HostInfo::getHostname) .findFirst().orElseThrow(); return URI.create(hostname + AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX); } }
class SecretStoreValidator { private static final String PROTOCOL = "http: private static final String AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX = ":4080/validate-secret-store"; private final SecretStore secretStore; private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build(); public SecretStoreValidator(SecretStore secretStore) { this.secretStore = secretStore; } public HttpResponse validateSecretStore(Application application, TenantSecretStore tenantSecretStore, String tenantSecretName) { var slime = toSlime(tenantSecretStore, tenantSecretName); var uri = getUri(application); return postRequest(uri, slime); } private Slime toSlime(TenantSecretStore tenantSecretStore, String tenantSecretName) { var slime = new Slime(); var cursor = slime.setObject(); cursor.setString("externalId", secretStore.getSecret(tenantSecretName)); cursor.setString("awsId", tenantSecretStore.getAwsId()); cursor.setString("name", tenantSecretStore.getName()); cursor.setString("role", tenantSecretStore.getRole()); return slime; } private URI getUri(Application application) { var hostname = application.getModel().getHosts() .stream() .filter(hostInfo -> hostInfo.getServices() .stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .count() > 0) .map(HostInfo::getHostname) .findFirst().orElseThrow(); return URI.create(PROTOCOL + hostname + AWS_PARAMETER_VALIDATION_HANDLER_POSTFIX); } }
Consider reusing constants for `ContainerCluster` etc
public void getConfig(RuleBasedFilterConfig.Builder builder) { Set<String> hostNames = endpoints.stream() .flatMap(e -> e.names().stream()) .collect(Collectors.toSet()); RuleBasedFilterConfig.Rule.Builder rule = new RuleBasedFilterConfig.Rule.Builder() .hostNames(hostNames) .pathExpressions("/reserved-for-internal-use/feedapi") .pathExpressions("/document/v1/{*}") .methods(List.of(PUT, POST, DELETE)) .action(BLOCK) .name("block-feed-global-endpoints") .blockResponseMessage("Feed to global endpoints are not allowed") .blockResponseCode(404); builder.rule(rule); builder.dryrun(dryRun); builder.defaultRule.action(ALLOW); }
.pathExpressions("/reserved-for-internal-use/feedapi")
public void getConfig(RuleBasedFilterConfig.Builder builder) { Set<String> hostNames = endpoints.stream() .flatMap(e -> e.names().stream()) .collect(Collectors.toSet()); RuleBasedFilterConfig.Rule.Builder rule = new RuleBasedFilterConfig.Rule.Builder() .hostNames(hostNames) .pathExpressions(ContainerCluster.RESERVED_URI_PREFIX + "/{*}") .pathExpressions(ContainerDocumentApi.DOCUMENT_V1_PREFIX + "/{*}") .methods(List.of(PUT, POST, DELETE)) .action(BLOCK) .name("block-feed-global-endpoints") .blockResponseMessage("Feed to global endpoints are not allowed") .blockResponseCode(404); builder.rule(rule); builder.dryrun(dryRun); builder.defaultRule.action(ALLOW); }
class BlockFeedGlobalEndpointsFilter extends Filter implements RuleBasedFilterConfig.Producer { private final Set<ContainerEndpoint> endpoints; private final boolean dryRun; public BlockFeedGlobalEndpointsFilter(Set<ContainerEndpoint> endpoints, boolean dryRun) { super(createFilterComponentModel()); this.endpoints = Set.copyOf(endpoints); this.dryRun = dryRun; } @Override private static ChainedComponentModel createFilterComponentModel() { return new ChainedComponentModel( new BundleInstantiationSpecification( new ComponentSpecification("com.yahoo.jdisc.http.filter.security.rule.RuleBasedRequestFilter"), null, new ComponentSpecification("jdisc-security-filters")), Dependencies.emptyDependencies()); } }
class BlockFeedGlobalEndpointsFilter extends Filter implements RuleBasedFilterConfig.Producer { private final Set<ContainerEndpoint> endpoints; private final boolean dryRun; public BlockFeedGlobalEndpointsFilter(Set<ContainerEndpoint> endpoints, boolean dryRun) { super(createFilterComponentModel()); this.endpoints = Set.copyOf(endpoints); this.dryRun = dryRun; } @Override private static ChainedComponentModel createFilterComponentModel() { return new ChainedComponentModel( new BundleInstantiationSpecification( new ComponentSpecification("com.yahoo.jdisc.http.filter.security.rule.RuleBasedRequestFilter"), null, new ComponentSpecification("jdisc-security-filters")), Dependencies.emptyDependencies()); } }
Perhaps a `java.util.concurrent.Condition` would be preferable over `Thread.sleep()`?
private void stallWhilePendingAbove(long pending) throws InterruptedException { while (pendingBytes() > pending) { Thread.sleep(1); } }
Thread.sleep(1);
private void stallWhilePendingAbove(long pending) throws InterruptedException { while (pendingBytes() > pending) { Thread.sleep(1); } }
class TrackCompletition implements CompletionHandler { final long written; TrackCompletition(long written) { this.written = written; sent.addAndGet(written); } @Override public void completed() { acked.addAndGet(written); } @Override public void failed(Throwable t) { acked.addAndGet(written); } }
class TrackCompletition implements CompletionHandler { private final long written; private final AtomicBoolean replied = new AtomicBoolean(false); TrackCompletition(long written) { this.written = written; sent.addAndGet(written); } @Override public void completed() { if (!replied.getAndSet(true)) { acked.addAndGet(written); } } @Override public void failed(Throwable t) { if (!replied.getAndSet(true)) { acked.addAndGet(written); } } }