comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Looks like the last event will get their agent reset to "system" when fromState == toState here. Is this intentional? | public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver) {
this.zkClient = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
for (Node.State state : Node.State.values())
zkClient.writeTo(state, zkClient.getNodes(state), Agent.system, Optional.empty());
} | zkClient.writeTo(state, zkClient.getNodes(state), Agent.system, Optional.empty()); | public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver) {
this.zkClient = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
for (Node.State state : Node.State.values())
zkClient.writeTo(state, zkClient.getNodes(state), Agent.system, Optional.empty());
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient zkClient;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver());
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return zkClient.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return zkClient.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return zkClient.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return zkClient.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return zkClient.getNodes(id, inState); }
public List<Node> getInactive() { return zkClient.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return zkClient.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private Set<Node> getTrustedNodes(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
case host:
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return Collections.unmodifiableSet(trustedNodes);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
List<NodeAcl> nodeAcls = new ArrayList<>();
NodeList candidates = new NodeList(getNodes());
if (children) {
List<Node> childNodes = candidates.childNodes(node).asList();
childNodes.forEach(childNode -> nodeAcls.add(new NodeAcl(childNode, getTrustedNodes(childNode, candidates))));
} else {
nodeAcls.add(new NodeAcl(node, getTrustedNodes(node, candidates)));
}
return Collections.unmodifiableList(nodeAcls);
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return zkClient.getDefaultFlavorForApplication(applicationId);
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return zkClient.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return zkClient.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return zkClient.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return zkClient.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
zkClient.writeTo(Node.State.inactive,
zkClient.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return zkClient.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return zkClient.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailure().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent) {
return move(hostname, Node.State.parked, agent, Optional.empty());
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.empty());
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return zkClient.writeTo(toState, node, agent, reason);
}
}
/**
* Removes a node. A node must be in the failed or parked state before it can be removed.
*
* @return true if the node was removed, false if it was not found in one of these states
*/
public boolean remove(String hostname) {
Optional<Node> nodeToRemove = getNode(hostname, Node.State.failed, Node.State.parked);
if ( ! nodeToRemove.isPresent()) return false;
try (Mutex lock = lock(nodeToRemove.get())) {
return zkClient.removeNode(nodeToRemove.get().state(), hostname);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return zkClient.writeTo(node.state(), node,
Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) {
if (nodes.isEmpty()) return Collections.emptyList();
Node.State state = nodes.get(0).state();
for (Node node : nodes) {
if ( node.state() != state)
throw new IllegalArgumentException("Multiple states: " + node.state() + " and " + state);
}
return zkClient.writeTo(state, nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : zkClient.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return zkClient.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return zkClient.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return zkClient.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient zkClient;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver());
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return zkClient.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return zkClient.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return zkClient.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return zkClient.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return zkClient.getNodes(id, inState); }
public List<Node> getInactive() { return zkClient.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return zkClient.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private Set<Node> getTrustedNodes(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
case host:
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return Collections.unmodifiableSet(trustedNodes);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
List<NodeAcl> nodeAcls = new ArrayList<>();
NodeList candidates = new NodeList(getNodes());
if (children) {
List<Node> childNodes = candidates.childNodes(node).asList();
childNodes.forEach(childNode -> nodeAcls.add(new NodeAcl(childNode, getTrustedNodes(childNode, candidates))));
} else {
nodeAcls.add(new NodeAcl(node, getTrustedNodes(node, candidates)));
}
return Collections.unmodifiableList(nodeAcls);
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return zkClient.getDefaultFlavorForApplication(applicationId);
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return zkClient.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return zkClient.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return zkClient.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return zkClient.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
zkClient.writeTo(Node.State.inactive,
zkClient.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return zkClient.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return zkClient.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailure().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent) {
return move(hostname, Node.State.parked, agent, Optional.empty());
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.empty());
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return zkClient.writeTo(toState, node, agent, reason);
}
}
/**
* Removes a node. A node must be in the failed or parked state before it can be removed.
*
* @return true if the node was removed, false if it was not found in one of these states
*/
public boolean remove(String hostname) {
Optional<Node> nodeToRemove = getNode(hostname, Node.State.failed, Node.State.parked);
if ( ! nodeToRemove.isPresent()) return false;
try (Mutex lock = lock(nodeToRemove.get())) {
return zkClient.removeNode(nodeToRemove.get().state(), hostname);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return zkClient.writeTo(node.state(), node,
Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) {
if (nodes.isEmpty()) return Collections.emptyList();
Node.State state = nodes.get(0).state();
for (Node node : nodes) {
if ( node.state() != state)
throw new IllegalArgumentException("Multiple states: " + node.state() + " and " + state);
}
return zkClient.writeTo(state, nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : zkClient.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return zkClient.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return zkClient.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return zkClient.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
} |
Not sure how you reached that conclusion? The agent parameter here is used to record the agent in the state transition history entry, but if there is no state transition it is not used for anything: ``` public History recordStateTransition(Node.State from, Node.State to, Agent agent, Instant at) { if (from == to) return this; ... ``` | public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver) {
this.zkClient = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
for (Node.State state : Node.State.values())
zkClient.writeTo(state, zkClient.getNodes(state), Agent.system, Optional.empty());
} | zkClient.writeTo(state, zkClient.getNodes(state), Agent.system, Optional.empty()); | public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver) {
this.zkClient = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
for (Node.State state : Node.State.values())
zkClient.writeTo(state, zkClient.getNodes(state), Agent.system, Optional.empty());
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient zkClient;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver());
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return zkClient.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return zkClient.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return zkClient.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return zkClient.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return zkClient.getNodes(id, inState); }
public List<Node> getInactive() { return zkClient.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return zkClient.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private Set<Node> getTrustedNodes(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
case host:
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return Collections.unmodifiableSet(trustedNodes);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
List<NodeAcl> nodeAcls = new ArrayList<>();
NodeList candidates = new NodeList(getNodes());
if (children) {
List<Node> childNodes = candidates.childNodes(node).asList();
childNodes.forEach(childNode -> nodeAcls.add(new NodeAcl(childNode, getTrustedNodes(childNode, candidates))));
} else {
nodeAcls.add(new NodeAcl(node, getTrustedNodes(node, candidates)));
}
return Collections.unmodifiableList(nodeAcls);
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return zkClient.getDefaultFlavorForApplication(applicationId);
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return zkClient.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return zkClient.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return zkClient.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return zkClient.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
zkClient.writeTo(Node.State.inactive,
zkClient.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return zkClient.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return zkClient.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailure().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent) {
return move(hostname, Node.State.parked, agent, Optional.empty());
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.empty());
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return zkClient.writeTo(toState, node, agent, reason);
}
}
/**
* Removes a node. A node must be in the failed or parked state before it can be removed.
*
* @return true if the node was removed, false if it was not found in one of these states
*/
public boolean remove(String hostname) {
Optional<Node> nodeToRemove = getNode(hostname, Node.State.failed, Node.State.parked);
if ( ! nodeToRemove.isPresent()) return false;
try (Mutex lock = lock(nodeToRemove.get())) {
return zkClient.removeNode(nodeToRemove.get().state(), hostname);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return zkClient.writeTo(node.state(), node,
Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) {
if (nodes.isEmpty()) return Collections.emptyList();
Node.State state = nodes.get(0).state();
for (Node node : nodes) {
if ( node.state() != state)
throw new IllegalArgumentException("Multiple states: " + node.state() + " and " + state);
}
return zkClient.writeTo(state, nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : zkClient.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return zkClient.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return zkClient.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return zkClient.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient zkClient;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver());
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return zkClient.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return zkClient.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return zkClient.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return zkClient.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return zkClient.getNodes(id, inState); }
public List<Node> getInactive() { return zkClient.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return zkClient.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private Set<Node> getTrustedNodes(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
case host:
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return Collections.unmodifiableSet(trustedNodes);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
List<NodeAcl> nodeAcls = new ArrayList<>();
NodeList candidates = new NodeList(getNodes());
if (children) {
List<Node> childNodes = candidates.childNodes(node).asList();
childNodes.forEach(childNode -> nodeAcls.add(new NodeAcl(childNode, getTrustedNodes(childNode, candidates))));
} else {
nodeAcls.add(new NodeAcl(node, getTrustedNodes(node, candidates)));
}
return Collections.unmodifiableList(nodeAcls);
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return zkClient.getDefaultFlavorForApplication(applicationId);
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return zkClient.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return zkClient.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return zkClient.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return zkClient.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
zkClient.writeTo(Node.State.inactive,
zkClient.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return zkClient.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return zkClient.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailure().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent) {
return move(hostname, Node.State.parked, agent, Optional.empty());
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.empty());
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return zkClient.writeTo(toState, node, agent, reason);
}
}
/**
* Removes a node. A node must be in the failed or parked state before it can be removed.
*
* @return true if the node was removed, false if it was not found in one of these states
*/
public boolean remove(String hostname) {
Optional<Node> nodeToRemove = getNode(hostname, Node.State.failed, Node.State.parked);
if ( ! nodeToRemove.isPresent()) return false;
try (Mutex lock = lock(nodeToRemove.get())) {
return zkClient.removeNode(nodeToRemove.get().state(), hostname);
}
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return zkClient.writeTo(node.state(), node,
Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) {
if (nodes.isEmpty()) return Collections.emptyList();
Node.State state = nodes.get(0).state();
for (Node node : nodes) {
if ( node.state() != state)
throw new IllegalArgumentException("Multiple states: " + node.state() + " and " + state);
}
return zkClient.writeTo(state, nodes, Agent.system, Optional.empty());
}
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : zkClient.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return zkClient.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return zkClient.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return zkClient.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
} |
This is a bit misleading. The NodeAdmin is not in a completely 'wantFrozen' state, but it may also not be entirely in the !wantFrozen state. How about just saying that the NodeAdmin has not yet converged to frozen (unfrozen)? | private void convergeState(State wantedState) {
boolean wantFrozen = wantedState != RESUMED;
if (!nodeAdmin.setFrozen(wantFrozen)) {
throw new RuntimeException("Wanted NodeAdmin to be " + (wantFrozen ? "frozen" : "unfrozen") +
" but instead is " + (!wantFrozen ? "frozen" : "unfrozen"));
}
if (wantedState == RESUMED) {
synchronized (monitor) {
currentState = RESUMED;
}
return;
}
if (currentState == RESUMED) {
if (! orchestrator.suspend(dockerHostHostName)) {
nodeAdmin.setFrozen(false);
throw new RuntimeException("Failed to get permission to suspend node-admin, resuming.");
}
synchronized (monitor) {
currentState = SUSPENDED_NODE_ADMIN;
}
if (wantedState == currentState) return;
}
List<String> nodesInActiveState;
try {
nodesInActiveState = getNodesInActiveState();
} catch (IOException e) {
throw new RuntimeException("Failed to get nodes from node repo:" + e.getMessage());
}
if (nodesInActiveState.size() > 0) {
orchestrator.suspend(dockerHostHostName, nodesInActiveState).ifPresent(orchestratorResponse -> {
nodeAdmin.setFrozen(false);
throw new RuntimeException("Failed to get permission to suspend, resuming. Reason: " + orchestratorResponse);
});
nodeAdmin.stopNodeAgentServices(nodesInActiveState);
}
synchronized (monitor) {
currentState = SUSPENDED;
}
} | throw new RuntimeException("Wanted NodeAdmin to be " + (wantFrozen ? "frozen" : "unfrozen") + | private void convergeState(State wantedState) {
boolean wantFrozen = wantedState != RESUMED;
if (!nodeAdmin.setFrozen(wantFrozen)) {
throw new RuntimeException("NodeAdmin has not yet converged to " + (wantFrozen ? "frozen" : "unfrozen"));
}
if (wantedState == RESUMED) {
synchronized (monitor) {
currentState = RESUMED;
}
return;
}
if (currentState == RESUMED) {
if (! orchestrator.suspend(dockerHostHostName)) {
nodeAdmin.setFrozen(false);
throw new RuntimeException("Failed to get permission to suspend node-admin, resuming.");
}
synchronized (monitor) {
currentState = SUSPENDED_NODE_ADMIN;
}
if (wantedState == currentState) return;
}
List<String> nodesInActiveState;
try {
nodesInActiveState = getNodesInActiveState();
} catch (IOException e) {
throw new RuntimeException("Failed to get nodes from node repo:" + e.getMessage());
}
if (nodesInActiveState.size() > 0) {
orchestrator.suspend(dockerHostHostName, nodesInActiveState).ifPresent(orchestratorResponse -> {
nodeAdmin.setFrozen(false);
throw new RuntimeException("Failed to get permission to suspend, resuming. Reason: " + orchestratorResponse);
});
nodeAdmin.stopNodeAgentServices(nodesInActiveState);
}
synchronized (monitor) {
currentState = SUSPENDED;
}
} | class NodeAdminStateUpdater extends AbstractComponent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private State currentState = RESUMED;
private State wantedState = RESUMED;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdminStateUpdater.class);
private Thread loopThread;
private final NodeRepository nodeRepository;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final Orchestrator orchestrator;
private final String dockerHostHostName;
private long delaysBetweenEachTickMillis = 30_000;
private Instant lastTick;
public NodeAdminStateUpdater(
final NodeRepository nodeRepository,
final NodeAdmin nodeAdmin,
Clock clock,
Orchestrator orchestrator,
String dockerHostHostName) {
this.nodeRepository = nodeRepository;
this.nodeAdmin = nodeAdmin;
this.clock = clock;
this.orchestrator = orchestrator;
this.dockerHostHostName = dockerHostHostName;
this.lastTick = clock.instant();
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
public Map<String, Object> getDebugPage() {
Map<String, Object> debug = new LinkedHashMap<>();
synchronized (monitor) {
debug.put("dockerHostHostName", dockerHostHostName);
debug.put("NodeAdmin", nodeAdmin.debugInfo());
debug.put("Wanted State: ", wantedState);
debug.put("Current State: ", currentState);
}
return debug;
}
public boolean setResumeStateAndCheckIfResumed(State wantedState) {
synchronized (monitor) {
if (this.wantedState != wantedState) {
this.wantedState = wantedState;
signalWorkToBeDone();
}
return currentState == wantedState;
}
}
void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
monitor.notifyAll();
}
}
}
void tick() {
State wantedState = null;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachTickMillis - Duration.between(lastTick, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: NodeAdminStateUpdater");
}
} else break;
}
lastTick = clock.instant();
workToDoNow = false;
if (currentState != this.wantedState) {
wantedState = this.wantedState;
}
}
if (wantedState != null) {
try {
convergeState(wantedState);
} catch (Exception e) {
logger.error("Failed to converge NodeAdminStateUpdater", e);
}
}
}
/**
* This method is attempts to converge NodeAgent's and NodeAdmin's frozen state with their orchestrator
* state. When trying to suspend node-admin, this method will first attempt to freeze all NodeAgents and
* NodeAdmin, then asking orchestrator for permission to suspend node-admin app, and finally asking orchestrator
* for permission to suspend all active nodes on this host, if either of the request is denied,
* this method will unfreeze NodeAgents and NodeAdmin.
*/
private void fetchContainersToRunFromNodeRepository() {
synchronized (monitor) {
if (currentState != RESUMED) {
logger.info("Frozen, skipping fetching info from node repository");
return;
}
final List<ContainerNodeSpec> containersToRun;
try {
containersToRun = nodeRepository.getContainersToRun();
} catch (Throwable t) {
logger.warning("Failed fetching container info from node repository", t);
return;
}
if (containersToRun == null) {
logger.warning("Got null from node repository");
return;
}
try {
nodeAdmin.refreshContainersToRun(containersToRun);
} catch (Throwable t) {
logger.warning("Failed updating node admin: ", t);
}
}
}
private List<String> getNodesInActiveState() throws IOException {
return nodeRepository.getContainersToRun()
.stream()
.filter(nodespec -> nodespec.nodeState == Node.State.active)
.map(nodespec -> nodespec.hostname)
.collect(Collectors.toList());
}
public void start(long stateConvergeInterval, long fetchContainersInterval) {
delaysBetweenEachTickMillis = stateConvergeInterval;
if (loopThread != null) {
throw new RuntimeException("Can not restart NodeAdminStateUpdater");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
scheduler.scheduleWithFixedDelay(
this::fetchContainersToRunFromNodeRepository,
0,
fetchContainersInterval,
MILLISECONDS);
}
@Override
public void deconstruct() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop NodeAdminStateUpdater tick thread");
}
scheduler.shutdown();
if (! scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not stop NodeAdminStateUpdater fetch containers scheduler.");
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop NodeAdminStateUpdater thread");
}
nodeAdmin.shutdown();
}
} | class NodeAdminStateUpdater extends AbstractComponent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private State currentState = RESUMED;
private State wantedState = RESUMED;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdminStateUpdater.class);
private Thread loopThread;
private final NodeRepository nodeRepository;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final Orchestrator orchestrator;
private final String dockerHostHostName;
private long delaysBetweenEachTickMillis = 30_000;
private Instant lastTick;
public NodeAdminStateUpdater(
final NodeRepository nodeRepository,
final NodeAdmin nodeAdmin,
Clock clock,
Orchestrator orchestrator,
String dockerHostHostName) {
this.nodeRepository = nodeRepository;
this.nodeAdmin = nodeAdmin;
this.clock = clock;
this.orchestrator = orchestrator;
this.dockerHostHostName = dockerHostHostName;
this.lastTick = clock.instant();
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
public Map<String, Object> getDebugPage() {
Map<String, Object> debug = new LinkedHashMap<>();
synchronized (monitor) {
debug.put("dockerHostHostName", dockerHostHostName);
debug.put("NodeAdmin", nodeAdmin.debugInfo());
debug.put("Wanted State: ", wantedState);
debug.put("Current State: ", currentState);
}
return debug;
}
public boolean setResumeStateAndCheckIfResumed(State wantedState) {
synchronized (monitor) {
if (this.wantedState != wantedState) {
this.wantedState = wantedState;
signalWorkToBeDone();
}
return currentState == wantedState;
}
}
void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
monitor.notifyAll();
}
}
}
void tick() {
State wantedState = null;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachTickMillis - Duration.between(lastTick, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: NodeAdminStateUpdater");
}
} else break;
}
lastTick = clock.instant();
workToDoNow = false;
if (currentState != this.wantedState) {
wantedState = this.wantedState;
}
}
if (wantedState != null) {
try {
convergeState(wantedState);
} catch (Exception e) {
logger.error("Failed to converge NodeAdminStateUpdater", e);
}
}
}
/**
* This method attempts to converge NodeAgent's and NodeAdmin's frozen state with their orchestrator
* state. When trying to suspend node-admin, this method will first attempt to freeze all NodeAgents and
* NodeAdmin, then asking orchestrator for permission to suspend node-admin app, and finally asking orchestrator
* for permission to suspend all active nodes on this host, if either of the request is denied,
* this method will unfreeze NodeAgents and NodeAdmin.
*/
private void fetchContainersToRunFromNodeRepository() {
synchronized (monitor) {
if (currentState != RESUMED) {
logger.info("Frozen, skipping fetching info from node repository");
return;
}
final List<ContainerNodeSpec> containersToRun;
try {
containersToRun = nodeRepository.getContainersToRun();
} catch (Throwable t) {
logger.warning("Failed fetching container info from node repository", t);
return;
}
if (containersToRun == null) {
logger.warning("Got null from node repository");
return;
}
try {
nodeAdmin.refreshContainersToRun(containersToRun);
} catch (Throwable t) {
logger.warning("Failed updating node admin: ", t);
}
}
}
private List<String> getNodesInActiveState() throws IOException {
return nodeRepository.getContainersToRun()
.stream()
.filter(nodespec -> nodespec.nodeState == Node.State.active)
.map(nodespec -> nodespec.hostname)
.collect(Collectors.toList());
}
public void start(long stateConvergeInterval, long fetchContainersInterval) {
delaysBetweenEachTickMillis = stateConvergeInterval;
if (loopThread != null) {
throw new RuntimeException("Can not restart NodeAdminStateUpdater");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
scheduler.scheduleWithFixedDelay(
this::fetchContainersToRunFromNodeRepository,
0,
fetchContainersInterval,
MILLISECONDS);
}
@Override
public void deconstruct() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop NodeAdminStateUpdater tick thread");
}
scheduler.shutdown();
if (! scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not stop NodeAdminStateUpdater fetch containers scheduler.");
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop NodeAdminStateUpdater thread");
}
nodeAdmin.shutdown();
}
} |
Fixed. | private void convergeState(State wantedState) {
boolean wantFrozen = wantedState != RESUMED;
if (!nodeAdmin.setFrozen(wantFrozen)) {
throw new RuntimeException("Wanted NodeAdmin to be " + (wantFrozen ? "frozen" : "unfrozen") +
" but instead is " + (!wantFrozen ? "frozen" : "unfrozen"));
}
if (wantedState == RESUMED) {
synchronized (monitor) {
currentState = RESUMED;
}
return;
}
if (currentState == RESUMED) {
if (! orchestrator.suspend(dockerHostHostName)) {
nodeAdmin.setFrozen(false);
throw new RuntimeException("Failed to get permission to suspend node-admin, resuming.");
}
synchronized (monitor) {
currentState = SUSPENDED_NODE_ADMIN;
}
if (wantedState == currentState) return;
}
List<String> nodesInActiveState;
try {
nodesInActiveState = getNodesInActiveState();
} catch (IOException e) {
throw new RuntimeException("Failed to get nodes from node repo:" + e.getMessage());
}
if (nodesInActiveState.size() > 0) {
orchestrator.suspend(dockerHostHostName, nodesInActiveState).ifPresent(orchestratorResponse -> {
nodeAdmin.setFrozen(false);
throw new RuntimeException("Failed to get permission to suspend, resuming. Reason: " + orchestratorResponse);
});
nodeAdmin.stopNodeAgentServices(nodesInActiveState);
}
synchronized (monitor) {
currentState = SUSPENDED;
}
} | throw new RuntimeException("Wanted NodeAdmin to be " + (wantFrozen ? "frozen" : "unfrozen") + | private void convergeState(State wantedState) {
boolean wantFrozen = wantedState != RESUMED;
if (!nodeAdmin.setFrozen(wantFrozen)) {
throw new RuntimeException("NodeAdmin has not yet converged to " + (wantFrozen ? "frozen" : "unfrozen"));
}
if (wantedState == RESUMED) {
synchronized (monitor) {
currentState = RESUMED;
}
return;
}
if (currentState == RESUMED) {
if (! orchestrator.suspend(dockerHostHostName)) {
nodeAdmin.setFrozen(false);
throw new RuntimeException("Failed to get permission to suspend node-admin, resuming.");
}
synchronized (monitor) {
currentState = SUSPENDED_NODE_ADMIN;
}
if (wantedState == currentState) return;
}
List<String> nodesInActiveState;
try {
nodesInActiveState = getNodesInActiveState();
} catch (IOException e) {
throw new RuntimeException("Failed to get nodes from node repo:" + e.getMessage());
}
if (nodesInActiveState.size() > 0) {
orchestrator.suspend(dockerHostHostName, nodesInActiveState).ifPresent(orchestratorResponse -> {
nodeAdmin.setFrozen(false);
throw new RuntimeException("Failed to get permission to suspend, resuming. Reason: " + orchestratorResponse);
});
nodeAdmin.stopNodeAgentServices(nodesInActiveState);
}
synchronized (monitor) {
currentState = SUSPENDED;
}
} | class NodeAdminStateUpdater extends AbstractComponent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private State currentState = RESUMED;
private State wantedState = RESUMED;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdminStateUpdater.class);
private Thread loopThread;
private final NodeRepository nodeRepository;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final Orchestrator orchestrator;
private final String dockerHostHostName;
private long delaysBetweenEachTickMillis = 30_000;
private Instant lastTick;
public NodeAdminStateUpdater(
final NodeRepository nodeRepository,
final NodeAdmin nodeAdmin,
Clock clock,
Orchestrator orchestrator,
String dockerHostHostName) {
this.nodeRepository = nodeRepository;
this.nodeAdmin = nodeAdmin;
this.clock = clock;
this.orchestrator = orchestrator;
this.dockerHostHostName = dockerHostHostName;
this.lastTick = clock.instant();
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
public Map<String, Object> getDebugPage() {
Map<String, Object> debug = new LinkedHashMap<>();
synchronized (monitor) {
debug.put("dockerHostHostName", dockerHostHostName);
debug.put("NodeAdmin", nodeAdmin.debugInfo());
debug.put("Wanted State: ", wantedState);
debug.put("Current State: ", currentState);
}
return debug;
}
public boolean setResumeStateAndCheckIfResumed(State wantedState) {
synchronized (monitor) {
if (this.wantedState != wantedState) {
this.wantedState = wantedState;
signalWorkToBeDone();
}
return currentState == wantedState;
}
}
void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
monitor.notifyAll();
}
}
}
void tick() {
State wantedState = null;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachTickMillis - Duration.between(lastTick, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: NodeAdminStateUpdater");
}
} else break;
}
lastTick = clock.instant();
workToDoNow = false;
if (currentState != this.wantedState) {
wantedState = this.wantedState;
}
}
if (wantedState != null) {
try {
convergeState(wantedState);
} catch (Exception e) {
logger.error("Failed to converge NodeAdminStateUpdater", e);
}
}
}
/**
* This method is attempts to converge NodeAgent's and NodeAdmin's frozen state with their orchestrator
* state. When trying to suspend node-admin, this method will first attempt to freeze all NodeAgents and
* NodeAdmin, then asking orchestrator for permission to suspend node-admin app, and finally asking orchestrator
* for permission to suspend all active nodes on this host, if either of the request is denied,
* this method will unfreeze NodeAgents and NodeAdmin.
*/
private void fetchContainersToRunFromNodeRepository() {
synchronized (monitor) {
if (currentState != RESUMED) {
logger.info("Frozen, skipping fetching info from node repository");
return;
}
final List<ContainerNodeSpec> containersToRun;
try {
containersToRun = nodeRepository.getContainersToRun();
} catch (Throwable t) {
logger.warning("Failed fetching container info from node repository", t);
return;
}
if (containersToRun == null) {
logger.warning("Got null from node repository");
return;
}
try {
nodeAdmin.refreshContainersToRun(containersToRun);
} catch (Throwable t) {
logger.warning("Failed updating node admin: ", t);
}
}
}
private List<String> getNodesInActiveState() throws IOException {
return nodeRepository.getContainersToRun()
.stream()
.filter(nodespec -> nodespec.nodeState == Node.State.active)
.map(nodespec -> nodespec.hostname)
.collect(Collectors.toList());
}
public void start(long stateConvergeInterval, long fetchContainersInterval) {
delaysBetweenEachTickMillis = stateConvergeInterval;
if (loopThread != null) {
throw new RuntimeException("Can not restart NodeAdminStateUpdater");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
scheduler.scheduleWithFixedDelay(
this::fetchContainersToRunFromNodeRepository,
0,
fetchContainersInterval,
MILLISECONDS);
}
@Override
public void deconstruct() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop NodeAdminStateUpdater tick thread");
}
scheduler.shutdown();
if (! scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not stop NodeAdminStateUpdater fetch containers scheduler.");
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop NodeAdminStateUpdater thread");
}
nodeAdmin.shutdown();
}
} | class NodeAdminStateUpdater extends AbstractComponent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private State currentState = RESUMED;
private State wantedState = RESUMED;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdminStateUpdater.class);
private Thread loopThread;
private final NodeRepository nodeRepository;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final Orchestrator orchestrator;
private final String dockerHostHostName;
private long delaysBetweenEachTickMillis = 30_000;
private Instant lastTick;
public NodeAdminStateUpdater(
final NodeRepository nodeRepository,
final NodeAdmin nodeAdmin,
Clock clock,
Orchestrator orchestrator,
String dockerHostHostName) {
this.nodeRepository = nodeRepository;
this.nodeAdmin = nodeAdmin;
this.clock = clock;
this.orchestrator = orchestrator;
this.dockerHostHostName = dockerHostHostName;
this.lastTick = clock.instant();
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
public Map<String, Object> getDebugPage() {
Map<String, Object> debug = new LinkedHashMap<>();
synchronized (monitor) {
debug.put("dockerHostHostName", dockerHostHostName);
debug.put("NodeAdmin", nodeAdmin.debugInfo());
debug.put("Wanted State: ", wantedState);
debug.put("Current State: ", currentState);
}
return debug;
}
public boolean setResumeStateAndCheckIfResumed(State wantedState) {
synchronized (monitor) {
if (this.wantedState != wantedState) {
this.wantedState = wantedState;
signalWorkToBeDone();
}
return currentState == wantedState;
}
}
void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
monitor.notifyAll();
}
}
}
void tick() {
State wantedState = null;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachTickMillis - Duration.between(lastTick, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: NodeAdminStateUpdater");
}
} else break;
}
lastTick = clock.instant();
workToDoNow = false;
if (currentState != this.wantedState) {
wantedState = this.wantedState;
}
}
if (wantedState != null) {
try {
convergeState(wantedState);
} catch (Exception e) {
logger.error("Failed to converge NodeAdminStateUpdater", e);
}
}
}
/**
* This method attempts to converge NodeAgent's and NodeAdmin's frozen state with their orchestrator
* state. When trying to suspend node-admin, this method will first attempt to freeze all NodeAgents and
* NodeAdmin, then asking orchestrator for permission to suspend node-admin app, and finally asking orchestrator
* for permission to suspend all active nodes on this host, if either of the request is denied,
* this method will unfreeze NodeAgents and NodeAdmin.
*/
private void fetchContainersToRunFromNodeRepository() {
synchronized (monitor) {
if (currentState != RESUMED) {
logger.info("Frozen, skipping fetching info from node repository");
return;
}
final List<ContainerNodeSpec> containersToRun;
try {
containersToRun = nodeRepository.getContainersToRun();
} catch (Throwable t) {
logger.warning("Failed fetching container info from node repository", t);
return;
}
if (containersToRun == null) {
logger.warning("Got null from node repository");
return;
}
try {
nodeAdmin.refreshContainersToRun(containersToRun);
} catch (Throwable t) {
logger.warning("Failed updating node admin: ", t);
}
}
}
private List<String> getNodesInActiveState() throws IOException {
return nodeRepository.getContainersToRun()
.stream()
.filter(nodespec -> nodespec.nodeState == Node.State.active)
.map(nodespec -> nodespec.hostname)
.collect(Collectors.toList());
}
public void start(long stateConvergeInterval, long fetchContainersInterval) {
delaysBetweenEachTickMillis = stateConvergeInterval;
if (loopThread != null) {
throw new RuntimeException("Can not restart NodeAdminStateUpdater");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
scheduler.scheduleWithFixedDelay(
this::fetchContainersToRunFromNodeRepository,
0,
fetchContainersInterval,
MILLISECONDS);
}
@Override
public void deconstruct() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop NodeAdminStateUpdater tick thread");
}
scheduler.shutdown();
if (! scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not stop NodeAdminStateUpdater fetch containers scheduler.");
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop NodeAdminStateUpdater thread");
}
nodeAdmin.shutdown();
}
} |
`toFullString()` | public String toString() {
return String.join(" ", type.toString(), id.toString(),
groupId.map(Group::toString).orElse(""),
vespaVersion.orElse(Version.emptyVersion).toString());
} | vespaVersion.orElse(Version.emptyVersion).toString()); | public String toString() {
return String.join(" ", type.toString(), id.toString(),
groupId.map(Group::toString).orElse(""),
vespaVersion.orElse(Version.emptyVersion).toString());
} | class ClusterSpec {
private final Type type;
private final Id id;
/** The group id of these hosts, or empty if this is represents a request for hosts */
private final Optional<Group> groupId;
private final Optional<Version> vespaVersion;
private ClusterSpec(Type type, Id id, Optional<Group> groupId, Optional<Version> vespaVersion) {
this.type = type;
this.id = id;
this.groupId = groupId;
this.vespaVersion = vespaVersion;
}
/** Returns the cluster type */
public Type type() { return type; }
/** Returns the cluster id */
public Id id() { return id; }
public Optional<Version> vespaVersion() { return vespaVersion; }
public Optional<String> dockerImage() {
return vespaVersion.map(DockerImage.defaultImage::withTag).map(DockerImage::toString);
}
/** Returns the group within the cluster this specifies, or empty to specify the whole cluster */
public Optional<Group> group() { return groupId; }
public ClusterSpec changeGroup(Optional<Group> newGroup) { return new ClusterSpec(type, id, newGroup, vespaVersion); }
/** Create a specification <b>requesting</b> a cluster with these attributes */
@Deprecated
public static ClusterSpec request(Type type, Id id, Optional<String> dockerImage) {
return requestVersion(type, id, dockerImage.map(DockerImage::new).map(DockerImage::tagAsVersion));
}
/** Create a specification <b>requesting</b> a cluster with these attributes */
public static ClusterSpec requestVersion(Type type, Id id, Optional<Version> vespaVersion) {
return new ClusterSpec(type, id, Optional.empty(), vespaVersion);
}
/** Create a specification <b>specifying</b> an existing cluster group having these attributes */
public static ClusterSpec from(Type type, Id id, Group groupId, Optional<Version> vespaVersion) {
return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion);
}
@Override
@Override
public int hashCode() { return type.hashCode() + 17 * id.hashCode() + 31 * groupId.hashCode(); }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof ClusterSpec)) return false;
ClusterSpec other = (ClusterSpec)o;
if ( ! other.type.equals(this.type)) return false;
if ( ! other.id.equals(this.id)) return false;
if ( ! other.groupId.equals(this.groupId)) return false;
if ( ! other.vespaVersion.equals(this.vespaVersion)) return false;
return true;
}
/** Returns whether this is equal, disregarding the group value and wanted Vespa version */
public boolean equalsIgnoringGroupAndVespaVersion(Object o) {
if (o == this) return true;
if ( ! (o instanceof ClusterSpec)) return false;
ClusterSpec other = (ClusterSpec)o;
if ( ! other.type.equals(this.type)) return false;
if ( ! other.id.equals(this.id)) return false;
return true;
}
/** A cluster type */
public enum Type {
admin,
container,
content;
public static Type from(String typeName) {
switch (typeName) {
case "admin" : return admin;
case "container" : return container;
case "content" : return content;
default: throw new IllegalArgumentException("Illegal cluster type '" + typeName + "'");
}
}
}
public static final class Id {
private final String id;
public Id(String id) {
Objects.requireNonNull(id, "Id cannot be null");
this.id = id;
}
public static Id from(String id) {
return new Id(id);
}
public String value() { return id; }
@Override
public String toString() { return "cluster '" + id + "'"; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Id)o).id.equals(this.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
}
/** Identifier of a group within a cluster */
@SuppressWarnings("deprecation")
public static final class Group {
private final int index;
private Group(int index) {
this.index = index;
}
public static Group from(int index) { return new Group(index); }
public int index() { return index; }
@Override
public String toString() { return "group " + index; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Group)o).index == this.index;
}
@Override
public int hashCode() { return index; }
}
} | class ClusterSpec {
private final Type type;
private final Id id;
/** The group id of these hosts, or empty if this is represents a request for hosts */
private final Optional<Group> groupId;
private final Optional<Version> vespaVersion;
private ClusterSpec(Type type, Id id, Optional<Group> groupId, Optional<Version> vespaVersion) {
this.type = type;
this.id = id;
this.groupId = groupId;
this.vespaVersion = vespaVersion;
}
/** Returns the cluster type */
public Type type() { return type; }
/** Returns the cluster id */
public Id id() { return id; }
public Optional<Version> vespaVersion() { return vespaVersion; }
public Optional<String> dockerImage() {
return vespaVersion.map(DockerImage.defaultImage::withTag).map(DockerImage::toString);
}
/** Returns the group within the cluster this specifies, or empty to specify the whole cluster */
public Optional<Group> group() { return groupId; }
public ClusterSpec changeGroup(Optional<Group> newGroup) { return new ClusterSpec(type, id, newGroup, vespaVersion); }
/** Create a specification <b>requesting</b> a cluster with these attributes */
@Deprecated
public static ClusterSpec request(Type type, Id id, Optional<String> dockerImage) {
return requestVersion(type, id, dockerImage.map(DockerImage::new).map(DockerImage::tagAsVersion));
}
/** Create a specification <b>requesting</b> a cluster with these attributes */
public static ClusterSpec requestVersion(Type type, Id id, Optional<Version> vespaVersion) {
return new ClusterSpec(type, id, Optional.empty(), vespaVersion);
}
/** Create a specification <b>specifying</b> an existing cluster group having these attributes */
public static ClusterSpec from(Type type, Id id, Group groupId, Optional<Version> vespaVersion) {
return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion);
}
@Override
@Override
public int hashCode() { return type.hashCode() + 17 * id.hashCode() + 31 * groupId.hashCode(); }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof ClusterSpec)) return false;
ClusterSpec other = (ClusterSpec)o;
if ( ! other.type.equals(this.type)) return false;
if ( ! other.id.equals(this.id)) return false;
if ( ! other.groupId.equals(this.groupId)) return false;
if ( ! other.vespaVersion.equals(this.vespaVersion)) return false;
return true;
}
/** Returns whether this is equal, disregarding the group value and wanted Vespa version */
public boolean equalsIgnoringGroupAndVespaVersion(Object o) {
if (o == this) return true;
if ( ! (o instanceof ClusterSpec)) return false;
ClusterSpec other = (ClusterSpec)o;
if ( ! other.type.equals(this.type)) return false;
if ( ! other.id.equals(this.id)) return false;
return true;
}
/** A cluster type */
public enum Type {
admin,
container,
content;
public static Type from(String typeName) {
switch (typeName) {
case "admin" : return admin;
case "container" : return container;
case "content" : return content;
default: throw new IllegalArgumentException("Illegal cluster type '" + typeName + "'");
}
}
}
public static final class Id {
private final String id;
public Id(String id) {
Objects.requireNonNull(id, "Id cannot be null");
this.id = id;
}
public static Id from(String id) {
return new Id(id);
}
public String value() { return id; }
@Override
public String toString() { return "cluster '" + id + "'"; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Id)o).id.equals(this.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
}
/** Identifier of a group within a cluster */
@SuppressWarnings("deprecation")
public static final class Group {
private final int index;
private Group(int index) {
this.index = index;
}
public static Group from(int index) { return new Group(index); }
public int index() { return index; }
@Override
public String toString() { return "group " + index; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Group)o).index == this.index;
}
@Override
public int hashCode() { return index; }
}
} |
`toFullString()` | private void toSlime(Allocation allocation, Cursor object) {
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.isRemovable());
allocation.membership().cluster().vespaVersion()
.ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString()));
} | .ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString())); | private void toSlime(Allocation allocation, Cursor object) {
object.setString(tenantIdKey, allocation.owner().tenant().value());
object.setString(applicationIdKey, allocation.owner().application().value());
object.setString(instanceIdKey, allocation.owner().instance().value());
object.setString(serviceIdKey, allocation.membership().stringValue());
object.setLong(restartGenerationKey, allocation.restartGeneration().wanted());
object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current());
object.setBool(removableKey, allocation.isRemovable());
allocation.membership().cluster().vespaVersion()
.ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString()));
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String openStackIdKey = "openStackId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String hostedVersionKey = "hostedVersion";
private static final String stateVersionKey = "stateVersion";
private static final String failCountKey = "failCount";
private static final String hardwareFailureKey = "hardwareFailure";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String flavorKey = "flavor";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String dockerImageKey = "dockerImage";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
toSlime(node.ipAddresses(), object.setArray(ipAddressesKey));
object.setString(openStackIdKey, node.openStackId());
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
object.setString(flavorKey, node.flavor().name());
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().hostedVersion().ifPresent(version -> object.setString(hostedVersionKey, version.toString()));
node.status().stateVersion().ifPresent(version -> object.setString(stateVersionKey, version));
node.status().dockerImage().ifPresent(image -> object.setString(dockerImageKey, image));
object.setLong(failCountKey, node.status().failCount());
node.status().hardwareFailure().ifPresent(failure -> object.setString(hardwareFailureKey, toString(failure)));
object.setBool(wantToRetireKey, node.status().wantToRetire());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history(), object.setArray(historyKey));
object.setString(nodeTypeKey, toString(node.type()));
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events())
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
public Node fromJson(Node.State state, byte[] data) {
return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Node.State state, Inspector object) {
return new Node(object.field(openStackIdKey).asString(),
ipAddressesFromSlime(object),
object.field(hostnameKey).asString(),
parentHostnameFromSlime(object),
flavorFromSlime(object),
statusFromSlime(object),
state,
allocationFromSlime(object.field(instanceKey)),
historyFromSlime(object.field(historyKey)),
nodeTypeFromString(object.field(nodeTypeKey).asString()));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
softwareVersionFromSlime(object.field(vespaVersionKey)),
softwareVersionFromSlime(object.field(hostedVersionKey)),
optionalString(object.field(stateVersionKey)),
optionalString(object.field(dockerImageKey)),
(int)object.field(failCountKey).asLong(),
hardwareFailureFromSlime(object.field(hardwareFailureKey)),
object.field(wantToRetireKey).asBool());
}
private Flavor flavorFromSlime(Inspector object) {
return flavors.getFlavorOrThrow(object.field(flavorKey).asString());
}
private Optional<Allocation> allocationFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool()));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector array) {
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return new History(events);
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
Optional<Version> vespaVersion;
if (object.field(dockerImageKey).valid()) {
vespaVersion = optionalString(object.field(dockerImageKey))
.map(DockerImage::new)
.map(DockerImage::tagAsVersion);
} else {
vespaVersion = softwareVersionFromSlime(object.field(wantedVespaVersionKey));
}
return ClusterMembership.fromVersion(object.field(serviceIdKey).asString(), vespaVersion);
}
private Optional<Version> softwareVersionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(Version.fromString(object.asString()));
}
private Optional<String> parentHostnameFromSlime(Inspector object) {
if (object.field(parentHostnameKey).valid())
return Optional.of(object.field(parentHostnameKey).asString());
else
return Optional.empty();
}
private Set<String> ipAddressesFromSlime(Inspector object) {
ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder();
object.field(ipAddressesKey).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses.build();
}
private Optional<Status.HardwareFailureType> hardwareFailureFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(hardwareFailureFromString(object.asString()));
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
switch (eventTypeString) {
case "readied" : return History.Event.Type.readied;
case "reserved" : return History.Event.Type.reserved;
case "activated" : return History.Event.Type.activated;
case "retired" : return History.Event.Type.retired;
case "deactivated" : return History.Event.Type.deactivated;
case "failed" : return History.Event.Type.failed;
case "deallocated" : return History.Event.Type.deallocated;
case "down" : return History.Event.Type.down;
case "requested" : return History.Event.Type.requested;
case "rebooted" : return History.Event.Type.rebooted;
}
throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
}
private String toString(History.Event.Type nodeEventType) {
switch (nodeEventType) {
case readied : return "readied";
case reserved : return "reserved";
case activated : return "activated";
case retired : return "retired";
case deactivated : return "deactivated";
case failed : return "failed";
case deallocated : return "deallocated";
case down : return "down";
case requested: return "requested";
case rebooted: return "rebooted";
}
throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined");
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
if ( ! eventAgentField.valid()) return Agent.system;
switch (eventAgentField.asString()) {
case "application" : return Agent.application;
case "system" : return Agent.system;
case "operator" : return Agent.operator;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
private String toString(Agent agent) {
switch (agent) {
case application : return "application";
case system : return "system";
case operator : return "operator";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
private NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
case "tenant" : return NodeType.tenant;
case "host" : return NodeType.host;
case "proxy" : return NodeType.proxy;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
private String toString(NodeType type) {
switch (type) {
case tenant: return "tenant";
case host: return "host";
case proxy: return "proxy";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
private Status.HardwareFailureType hardwareFailureFromString(String hardwareFailureString) {
switch (hardwareFailureString) {
case "memory_mcelog" : return Status.HardwareFailureType.memory_mcelog;
case "disk_smart" : return Status.HardwareFailureType.disk_smart;
case "disk_kernel" : return Status.HardwareFailureType.disk_kernel;
case "unknown" : return Status.HardwareFailureType.unknown;
default : throw new IllegalArgumentException("Unknown hardware failure '" + hardwareFailureString + "'");
}
}
private String toString(Status.HardwareFailureType type) {
switch (type) {
case memory_mcelog: return "memory_mcelog";
case disk_smart: return "disk_smart";
case disk_kernel: return "disk_kernel";
case unknown: return "unknown";
default : throw new IllegalArgumentException("Serialized form of '" + type + " not defined");
}
}
} | class NodeSerializer {
/** The configured node flavors */
private final NodeFlavors flavors;
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String openStackIdKey = "openStackId";
private static final String parentHostnameKey = "parentHostname";
private static final String historyKey = "history";
private static final String instanceKey = "instance";
private static final String rebootGenerationKey = "rebootGeneration";
private static final String currentRebootGenerationKey = "currentRebootGeneration";
private static final String vespaVersionKey = "vespaVersion";
private static final String hostedVersionKey = "hostedVersion";
private static final String stateVersionKey = "stateVersion";
private static final String failCountKey = "failCount";
private static final String hardwareFailureKey = "hardwareFailure";
private static final String nodeTypeKey = "type";
private static final String wantToRetireKey = "wantToRetire";
private static final String flavorKey = "flavor";
private static final String tenantIdKey = "tenantId";
private static final String applicationIdKey = "applicationId";
private static final String instanceIdKey = "instanceId";
private static final String serviceIdKey = "serviceId";
private static final String restartGenerationKey = "restartGeneration";
private static final String currentRestartGenerationKey = "currentRestartGeneration";
private static final String removableKey = "removable";
private static final String dockerImageKey = "dockerImage";
private static final String wantedVespaVersionKey = "wantedVespaVersion";
private static final String historyEventTypeKey = "type";
private static final String atKey = "at";
private static final String agentKey = "agent";
public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
public byte[] toJson(Node node) {
try {
Slime slime = new Slime();
toSlime(node, slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
catch (IOException e) {
throw new RuntimeException("Serialization of " + node + " to json failed", e);
}
}
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
toSlime(node.ipAddresses(), object.setArray(ipAddressesKey));
object.setString(openStackIdKey, node.openStackId());
node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname));
object.setString(flavorKey, node.flavor().name());
object.setLong(rebootGenerationKey, node.status().reboot().wanted());
object.setLong(currentRebootGenerationKey, node.status().reboot().current());
node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString()));
node.status().hostedVersion().ifPresent(version -> object.setString(hostedVersionKey, version.toString()));
node.status().stateVersion().ifPresent(version -> object.setString(stateVersionKey, version));
node.status().dockerImage().ifPresent(image -> object.setString(dockerImageKey, image));
object.setLong(failCountKey, node.status().failCount());
node.status().hardwareFailure().ifPresent(failure -> object.setString(hardwareFailureKey, toString(failure)));
object.setBool(wantToRetireKey, node.status().wantToRetire());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history(), object.setArray(historyKey));
object.setString(nodeTypeKey, toString(node.type()));
}
private void toSlime(History history, Cursor array) {
for (History.Event event : history.events())
toSlime(event, array.addObject());
}
private void toSlime(History.Event event, Cursor object) {
object.setString(historyEventTypeKey, toString(event.type()));
object.setLong(atKey, event.at().toEpochMilli());
object.setString(agentKey, toString(event.agent()));
}
private void toSlime(Set<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
public Node fromJson(Node.State state, byte[] data) {
return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get());
}
private Node nodeFromSlime(Node.State state, Inspector object) {
return new Node(object.field(openStackIdKey).asString(),
ipAddressesFromSlime(object),
object.field(hostnameKey).asString(),
parentHostnameFromSlime(object),
flavorFromSlime(object),
statusFromSlime(object),
state,
allocationFromSlime(object.field(instanceKey)),
historyFromSlime(object.field(historyKey)),
nodeTypeFromString(object.field(nodeTypeKey).asString()));
}
private Status statusFromSlime(Inspector object) {
return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey),
softwareVersionFromSlime(object.field(vespaVersionKey)),
softwareVersionFromSlime(object.field(hostedVersionKey)),
optionalString(object.field(stateVersionKey)),
optionalString(object.field(dockerImageKey)),
(int)object.field(failCountKey).asLong(),
hardwareFailureFromSlime(object.field(hardwareFailureKey)),
object.field(wantToRetireKey).asBool());
}
private Flavor flavorFromSlime(Inspector object) {
return flavors.getFlavorOrThrow(object.field(flavorKey).asString());
}
private Optional<Allocation> allocationFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(new Allocation(applicationIdFromSlime(object),
clusterMembershipFromSlime(object),
generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey),
object.field(removableKey).asBool()));
}
private ApplicationId applicationIdFromSlime(Inspector object) {
return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()),
ApplicationName.from(object.field(applicationIdKey).asString()),
InstanceName.from(object.field(instanceIdKey).asString()));
}
private History historyFromSlime(Inspector array) {
List<History.Event> events = new ArrayList<>();
array.traverse((ArrayTraverser) (int i, Inspector item) -> {
History.Event event = eventFromSlime(item);
if (event != null)
events.add(event);
});
return new History(events);
}
private History.Event eventFromSlime(Inspector object) {
History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString());
if (type == null) return null;
Instant at = Instant.ofEpochMilli(object.field(atKey).asLong());
Agent agent = eventAgentFromSlime(object.field(agentKey));
return new History.Event(type, agent, at);
}
private Generation generationFromSlime(Inspector object, String wantedField, String currentField) {
Inspector current = object.field(currentField);
return new Generation(object.field(wantedField).asLong(), current.asLong());
}
private ClusterMembership clusterMembershipFromSlime(Inspector object) {
Optional<Version> vespaVersion;
if (object.field(dockerImageKey).valid()) {
vespaVersion = optionalString(object.field(dockerImageKey))
.map(DockerImage::new)
.map(DockerImage::tagAsVersion);
} else {
vespaVersion = softwareVersionFromSlime(object.field(wantedVespaVersionKey));
}
return ClusterMembership.fromVersion(object.field(serviceIdKey).asString(), vespaVersion);
}
private Optional<Version> softwareVersionFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(Version.fromString(object.asString()));
}
private Optional<String> parentHostnameFromSlime(Inspector object) {
if (object.field(parentHostnameKey).valid())
return Optional.of(object.field(parentHostnameKey).asString());
else
return Optional.empty();
}
private Set<String> ipAddressesFromSlime(Inspector object) {
ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder();
object.field(ipAddressesKey).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString()));
return ipAddresses.build();
}
private Optional<Status.HardwareFailureType> hardwareFailureFromSlime(Inspector object) {
if ( ! object.valid()) return Optional.empty();
return Optional.of(hardwareFailureFromString(object.asString()));
}
/** Returns the event type, or null if this event type should be ignored */
private History.Event.Type eventTypeFromString(String eventTypeString) {
switch (eventTypeString) {
case "readied" : return History.Event.Type.readied;
case "reserved" : return History.Event.Type.reserved;
case "activated" : return History.Event.Type.activated;
case "retired" : return History.Event.Type.retired;
case "deactivated" : return History.Event.Type.deactivated;
case "failed" : return History.Event.Type.failed;
case "deallocated" : return History.Event.Type.deallocated;
case "down" : return History.Event.Type.down;
case "requested" : return History.Event.Type.requested;
case "rebooted" : return History.Event.Type.rebooted;
}
throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'");
}
private String toString(History.Event.Type nodeEventType) {
switch (nodeEventType) {
case readied : return "readied";
case reserved : return "reserved";
case activated : return "activated";
case retired : return "retired";
case deactivated : return "deactivated";
case failed : return "failed";
case deallocated : return "deallocated";
case down : return "down";
case requested: return "requested";
case rebooted: return "rebooted";
}
throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined");
}
private Agent eventAgentFromSlime(Inspector eventAgentField) {
if ( ! eventAgentField.valid()) return Agent.system;
switch (eventAgentField.asString()) {
case "application" : return Agent.application;
case "system" : return Agent.system;
case "operator" : return Agent.operator;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
private String toString(Agent agent) {
switch (agent) {
case application : return "application";
case system : return "system";
case operator : return "operator";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
private NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
case "tenant" : return NodeType.tenant;
case "host" : return NodeType.host;
case "proxy" : return NodeType.proxy;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
private String toString(NodeType type) {
switch (type) {
case tenant: return "tenant";
case host: return "host";
case proxy: return "proxy";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
private Status.HardwareFailureType hardwareFailureFromString(String hardwareFailureString) {
switch (hardwareFailureString) {
case "memory_mcelog" : return Status.HardwareFailureType.memory_mcelog;
case "disk_smart" : return Status.HardwareFailureType.disk_smart;
case "disk_kernel" : return Status.HardwareFailureType.disk_kernel;
case "unknown" : return Status.HardwareFailureType.unknown;
default : throw new IllegalArgumentException("Unknown hardware failure '" + hardwareFailureString + "'");
}
}
private String toString(Status.HardwareFailureType type) {
switch (type) {
case memory_mcelog: return "memory_mcelog";
case disk_smart: return "disk_smart";
case disk_kernel: return "disk_kernel";
case unknown: return "unknown";
default : throw new IllegalArgumentException("Serialized form of '" + type + " not defined");
}
}
} |
It doesn't really matter here ... only when Version is used to look up in artifactory. | public String toString() {
return String.join(" ", type.toString(), id.toString(),
groupId.map(Group::toString).orElse(""),
vespaVersion.orElse(Version.emptyVersion).toString());
} | vespaVersion.orElse(Version.emptyVersion).toString()); | public String toString() {
return String.join(" ", type.toString(), id.toString(),
groupId.map(Group::toString).orElse(""),
vespaVersion.orElse(Version.emptyVersion).toString());
} | class ClusterSpec {
private final Type type;
private final Id id;
/** The group id of these hosts, or empty if this is represents a request for hosts */
private final Optional<Group> groupId;
private final Optional<Version> vespaVersion;
private ClusterSpec(Type type, Id id, Optional<Group> groupId, Optional<Version> vespaVersion) {
this.type = type;
this.id = id;
this.groupId = groupId;
this.vespaVersion = vespaVersion;
}
/** Returns the cluster type */
public Type type() { return type; }
/** Returns the cluster id */
public Id id() { return id; }
public Optional<Version> vespaVersion() { return vespaVersion; }
public Optional<String> dockerImage() {
return vespaVersion.map(DockerImage.defaultImage::withTag).map(DockerImage::toString);
}
/** Returns the group within the cluster this specifies, or empty to specify the whole cluster */
public Optional<Group> group() { return groupId; }
public ClusterSpec changeGroup(Optional<Group> newGroup) { return new ClusterSpec(type, id, newGroup, vespaVersion); }
/** Create a specification <b>requesting</b> a cluster with these attributes */
@Deprecated
public static ClusterSpec request(Type type, Id id, Optional<String> dockerImage) {
return requestVersion(type, id, dockerImage.map(DockerImage::new).map(DockerImage::tagAsVersion));
}
/** Create a specification <b>requesting</b> a cluster with these attributes */
public static ClusterSpec requestVersion(Type type, Id id, Optional<Version> vespaVersion) {
return new ClusterSpec(type, id, Optional.empty(), vespaVersion);
}
/** Create a specification <b>specifying</b> an existing cluster group having these attributes */
public static ClusterSpec from(Type type, Id id, Group groupId, Optional<Version> vespaVersion) {
return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion);
}
@Override
@Override
public int hashCode() { return type.hashCode() + 17 * id.hashCode() + 31 * groupId.hashCode(); }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof ClusterSpec)) return false;
ClusterSpec other = (ClusterSpec)o;
if ( ! other.type.equals(this.type)) return false;
if ( ! other.id.equals(this.id)) return false;
if ( ! other.groupId.equals(this.groupId)) return false;
if ( ! other.vespaVersion.equals(this.vespaVersion)) return false;
return true;
}
/** Returns whether this is equal, disregarding the group value and wanted Vespa version */
public boolean equalsIgnoringGroupAndVespaVersion(Object o) {
if (o == this) return true;
if ( ! (o instanceof ClusterSpec)) return false;
ClusterSpec other = (ClusterSpec)o;
if ( ! other.type.equals(this.type)) return false;
if ( ! other.id.equals(this.id)) return false;
return true;
}
/** A cluster type */
public enum Type {
admin,
container,
content;
public static Type from(String typeName) {
switch (typeName) {
case "admin" : return admin;
case "container" : return container;
case "content" : return content;
default: throw new IllegalArgumentException("Illegal cluster type '" + typeName + "'");
}
}
}
public static final class Id {
private final String id;
public Id(String id) {
Objects.requireNonNull(id, "Id cannot be null");
this.id = id;
}
public static Id from(String id) {
return new Id(id);
}
public String value() { return id; }
@Override
public String toString() { return "cluster '" + id + "'"; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Id)o).id.equals(this.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
}
/** Identifier of a group within a cluster */
@SuppressWarnings("deprecation")
public static final class Group {
private final int index;
private Group(int index) {
this.index = index;
}
public static Group from(int index) { return new Group(index); }
public int index() { return index; }
@Override
public String toString() { return "group " + index; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Group)o).index == this.index;
}
@Override
public int hashCode() { return index; }
}
} | class ClusterSpec {
private final Type type;
private final Id id;
/** The group id of these hosts, or empty if this is represents a request for hosts */
private final Optional<Group> groupId;
private final Optional<Version> vespaVersion;
private ClusterSpec(Type type, Id id, Optional<Group> groupId, Optional<Version> vespaVersion) {
this.type = type;
this.id = id;
this.groupId = groupId;
this.vespaVersion = vespaVersion;
}
/** Returns the cluster type */
public Type type() { return type; }
/** Returns the cluster id */
public Id id() { return id; }
public Optional<Version> vespaVersion() { return vespaVersion; }
public Optional<String> dockerImage() {
return vespaVersion.map(DockerImage.defaultImage::withTag).map(DockerImage::toString);
}
/** Returns the group within the cluster this specifies, or empty to specify the whole cluster */
public Optional<Group> group() { return groupId; }
public ClusterSpec changeGroup(Optional<Group> newGroup) { return new ClusterSpec(type, id, newGroup, vespaVersion); }
/** Create a specification <b>requesting</b> a cluster with these attributes */
@Deprecated
public static ClusterSpec request(Type type, Id id, Optional<String> dockerImage) {
return requestVersion(type, id, dockerImage.map(DockerImage::new).map(DockerImage::tagAsVersion));
}
/** Create a specification <b>requesting</b> a cluster with these attributes */
public static ClusterSpec requestVersion(Type type, Id id, Optional<Version> vespaVersion) {
return new ClusterSpec(type, id, Optional.empty(), vespaVersion);
}
/** Create a specification <b>specifying</b> an existing cluster group having these attributes */
public static ClusterSpec from(Type type, Id id, Group groupId, Optional<Version> vespaVersion) {
return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion);
}
@Override
@Override
public int hashCode() { return type.hashCode() + 17 * id.hashCode() + 31 * groupId.hashCode(); }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof ClusterSpec)) return false;
ClusterSpec other = (ClusterSpec)o;
if ( ! other.type.equals(this.type)) return false;
if ( ! other.id.equals(this.id)) return false;
if ( ! other.groupId.equals(this.groupId)) return false;
if ( ! other.vespaVersion.equals(this.vespaVersion)) return false;
return true;
}
/** Returns whether this is equal, disregarding the group value and wanted Vespa version */
public boolean equalsIgnoringGroupAndVespaVersion(Object o) {
if (o == this) return true;
if ( ! (o instanceof ClusterSpec)) return false;
ClusterSpec other = (ClusterSpec)o;
if ( ! other.type.equals(this.type)) return false;
if ( ! other.id.equals(this.id)) return false;
return true;
}
/** A cluster type */
public enum Type {
admin,
container,
content;
public static Type from(String typeName) {
switch (typeName) {
case "admin" : return admin;
case "container" : return container;
case "content" : return content;
default: throw new IllegalArgumentException("Illegal cluster type '" + typeName + "'");
}
}
}
public static final class Id {
private final String id;
public Id(String id) {
Objects.requireNonNull(id, "Id cannot be null");
this.id = id;
}
public static Id from(String id) {
return new Id(id);
}
public String value() { return id; }
@Override
public String toString() { return "cluster '" + id + "'"; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Id)o).id.equals(this.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
}
/** Identifier of a group within a cluster */
@SuppressWarnings("deprecation")
public static final class Group {
private final int index;
private Group(int index) {
this.index = index;
}
public static Group from(int index) { return new Group(index); }
public int index() { return index; }
@Override
public String toString() { return "group " + index; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
return ((Group)o).index == this.index;
}
@Override
public int hashCode() { return index; }
}
} |
XXX? | public void test_application_maintenance() throws InterruptedException {
ManualClock clock = new ManualClock();
Curator curator = new MockCurator();
Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
this.nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone,
new MockNameResolver().mockAnyLookup());
this.fixture = new Fixture(zone, nodeRepository, nodeFlavors, curator);
createReadyNodes(15, nodeRepository, nodeFlavors);
createHostNodes(2, nodeRepository, nodeFlavors);
fixture.activate();
OperatorChangeApplicationMaintainer maintainer = new OperatorChangeApplicationMaintainer(fixture.deployer, nodeRepository, clock, Duration.ofMinutes(1));
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("No changes -> no redeployments", 0, fixture.deployer.redeployments);
nodeRepository.fail(nodeRepository.getNodes(fixture.app1).get(3).hostname(), Agent.system, "Failing to unit test");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("System change -> no redeployments", 0, fixture.deployer.redeployments);
clock.advance(Duration.ofSeconds(1));
nodeRepository.fail(nodeRepository.getNodes(fixture.app2).get(4).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("Operator change -> redeployment", 1, fixture.deployer.redeployments);
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("No further operator changes -> no (new) redeployments", 1, fixture.deployer.redeployments);
} | maintainer.maintain(); | public void test_application_maintenance() throws InterruptedException {
ManualClock clock = new ManualClock();
Curator curator = new MockCurator();
Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
this.nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone,
new MockNameResolver().mockAnyLookup());
this.fixture = new Fixture(zone, nodeRepository, nodeFlavors, curator);
createReadyNodes(15, nodeRepository, nodeFlavors);
createHostNodes(2, nodeRepository, nodeFlavors);
fixture.activate();
OperatorChangeApplicationMaintainer maintainer = new OperatorChangeApplicationMaintainer(fixture.deployer, nodeRepository, clock, Duration.ofMinutes(1));
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("No changes -> no redeployments", 0, fixture.deployer.redeployments);
nodeRepository.fail(nodeRepository.getNodes(fixture.app1).get(3).hostname(), Agent.system, "Failing to unit test");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("System change -> no redeployments", 0, fixture.deployer.redeployments);
clock.advance(Duration.ofSeconds(1));
nodeRepository.fail(nodeRepository.getNodes(fixture.app2).get(4).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("Operator change -> redeployment", 1, fixture.deployer.redeployments);
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("No further operator changes -> no (new) redeployments", 1, fixture.deployer.redeployments);
} | class OperatorChangeApplicationMaintainerTest {
private static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default");
private NodeRepository nodeRepository;
private Fixture fixture;
@Test
private void createReadyNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) {
List<Node> nodes = new ArrayList<>(count);
for (int i = 0; i < count; i++)
nodes.add(nodeRepository.createNode("node" + i, "host" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant));
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
private void createHostNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) {
List<Node> nodes = new ArrayList<>(count);
for (int i = 0; i < count; i++)
nodes.add(nodeRepository.createNode("hostNode" + i, "realHost" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host));
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
private class Fixture {
final NodeRepository nodeRepository;
final NodeRepositoryProvisioner provisioner;
final Curator curator;
final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz"));
final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz"));
final ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Optional.empty());
final ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Optional.empty());
final int wantedNodesApp1 = 5;
final int wantedNodesApp2 = 7;
MockDeployer deployer;
Fixture(Zone zone, NodeRepository nodeRepository, NodeFlavors flavors, Curator curator) {
this.nodeRepository = nodeRepository;
this.curator = curator;
this.provisioner = new NodeRepositoryProvisioner(nodeRepository, flavors, zone);
}
void activate() {
activate(app1, clusterApp1, wantedNodesApp1, provisioner);
activate(app2, clusterApp2, wantedNodesApp2, provisioner);
assertEquals(wantedNodesApp1, nodeRepository.getNodes(app1, Node.State.active).size());
assertEquals(wantedNodesApp2, nodeRepository.getNodes(app2, Node.State.active).size());
Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>();
apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1,
Capacity.fromNodeCount(wantedNodesApp1, Optional.of("default")), 1));
apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2,
Capacity.fromNodeCount(wantedNodesApp2, Optional.of("default")), 1));
this.deployer = new MockDeployer(provisioner, apps);
}
private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount, NodeRepositoryProvisioner provisioner) {
List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, Capacity.fromNodeCount(nodeCount), 1, null);
NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
provisioner.activate(transaction, applicationId, hosts);
transaction.commit();
}
void remove(ApplicationId application) {
NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
provisioner.remove(transaction, application);
transaction.commit();
}
NodeList getNodes(Node.State ... states) {
return new NodeList(nodeRepository.getNodes(NodeType.tenant, states));
}
}
} | class OperatorChangeApplicationMaintainerTest {
private static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default");
private NodeRepository nodeRepository;
private Fixture fixture;
@Test
private void createReadyNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) {
List<Node> nodes = new ArrayList<>(count);
for (int i = 0; i < count; i++)
nodes.add(nodeRepository.createNode("node" + i, "host" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant));
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
private void createHostNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) {
List<Node> nodes = new ArrayList<>(count);
for (int i = 0; i < count; i++)
nodes.add(nodeRepository.createNode("hostNode" + i, "realHost" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host));
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
private class Fixture {
final NodeRepository nodeRepository;
final NodeRepositoryProvisioner provisioner;
final Curator curator;
final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz"));
final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz"));
final ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Optional.empty());
final ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Optional.empty());
final int wantedNodesApp1 = 5;
final int wantedNodesApp2 = 7;
MockDeployer deployer;
Fixture(Zone zone, NodeRepository nodeRepository, NodeFlavors flavors, Curator curator) {
this.nodeRepository = nodeRepository;
this.curator = curator;
this.provisioner = new NodeRepositoryProvisioner(nodeRepository, flavors, zone);
}
void activate() {
activate(app1, clusterApp1, wantedNodesApp1, provisioner);
activate(app2, clusterApp2, wantedNodesApp2, provisioner);
assertEquals(wantedNodesApp1, nodeRepository.getNodes(app1, Node.State.active).size());
assertEquals(wantedNodesApp2, nodeRepository.getNodes(app2, Node.State.active).size());
Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>();
apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1,
Capacity.fromNodeCount(wantedNodesApp1, Optional.of("default")), 1));
apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2,
Capacity.fromNodeCount(wantedNodesApp2, Optional.of("default")), 1));
this.deployer = new MockDeployer(provisioner, apps);
}
private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount, NodeRepositoryProvisioner provisioner) {
List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, Capacity.fromNodeCount(nodeCount), 1, null);
NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
provisioner.activate(transaction, applicationId, hosts);
transaction.commit();
}
void remove(ApplicationId application) {
NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
provisioner.remove(transaction, application);
transaction.commit();
}
NodeList getNodes(Node.State ... states) {
return new NodeList(nodeRepository.getNodes(NodeType.tenant, states));
}
}
} |
Consider using com.yahoo.config.model.test.TestUtil.joinLines() to avoid explicit '\n's. | public void throws_exception_on_incompatible_use_of_fastaccess() throws ParseException {
SearchBuilder builder = new SearchBuilder(new RankProfileRegistry());
builder.importString(
"search test {\n" +
" document test { \n" +
" field int_attribute type int { \n" +
" indexing: attribute \n" +
" attribute: fast-access\n" +
" }\n" +
" field predicate_attribute type predicate {\n" +
" indexing: attribute \n" +
" attribute: fast-access\n" +
" }\n" +
" field tensor_attribute type tensor(x[]) {\n" +
" indexing: attribute \n" +
" attribute: fast-access\n" +
" }\n" +
" }\n" +
"}\n");
exceptionRule.expect(IllegalArgumentException.class);
exceptionRule.expectMessage(
"For search 'test': The following attributes have a type that is incompatible " +
"with fast-access: predicate_attribute, tensor_attribute. " +
"Predicate, tensor and reference attributes are incompatible with fast-access.");
builder.build();
} | "search test {\n" + | public void throws_exception_on_incompatible_use_of_fastaccess() throws ParseException {
SearchBuilder builder = new SearchBuilder(new RankProfileRegistry());
builder.importString(
TestUtil.joinLines(
"search parent {",
" document parent {",
" field int_field type int { indexing: attribute }",
" }",
"}"));
builder.importString(
TestUtil.joinLines(
"search test {",
" document test { ",
" field int_attribute type int { ",
" indexing: attribute ",
" attribute: fast-access",
" }",
" field predicate_attribute type predicate {",
" indexing: attribute ",
" attribute: fast-access",
" }",
" field tensor_attribute type tensor(x[]) {",
" indexing: attribute ",
" attribute: fast-access",
" }",
" field reference_attribute type reference<parent> {",
" indexing: attribute ",
" attribute: fast-access",
" }",
" }",
"}"));
exceptionRule.expect(IllegalArgumentException.class);
exceptionRule.expectMessage(
"For search 'test': The following attributes have a type that is incompatible " +
"with fast-access: predicate_attribute, tensor_attribute, reference_attribute. " +
"Predicate, tensor and reference attributes are incompatible with fast-access.");
builder.build();
} | class FastAccessValidatorTest {
@Rule
public final ExpectedException exceptionRule = ExpectedException.none();
@Test
} | class FastAccessValidatorTest {
@Rule
public final ExpectedException exceptionRule = ExpectedException.none();
@Test
} |
Consider testing with a reference field as well. I think it should be enough to call importString() with the parent sd. | public void throws_exception_on_incompatible_use_of_fastaccess() throws ParseException {
SearchBuilder builder = new SearchBuilder(new RankProfileRegistry());
builder.importString(
"search test {\n" +
" document test { \n" +
" field int_attribute type int { \n" +
" indexing: attribute \n" +
" attribute: fast-access\n" +
" }\n" +
" field predicate_attribute type predicate {\n" +
" indexing: attribute \n" +
" attribute: fast-access\n" +
" }\n" +
" field tensor_attribute type tensor(x[]) {\n" +
" indexing: attribute \n" +
" attribute: fast-access\n" +
" }\n" +
" }\n" +
"}\n");
exceptionRule.expect(IllegalArgumentException.class);
exceptionRule.expectMessage(
"For search 'test': The following attributes have a type that is incompatible " +
"with fast-access: predicate_attribute, tensor_attribute. " +
"Predicate, tensor and reference attributes are incompatible with fast-access.");
builder.build();
} | "with fast-access: predicate_attribute, tensor_attribute. " + | public void throws_exception_on_incompatible_use_of_fastaccess() throws ParseException {
SearchBuilder builder = new SearchBuilder(new RankProfileRegistry());
builder.importString(
TestUtil.joinLines(
"search parent {",
" document parent {",
" field int_field type int { indexing: attribute }",
" }",
"}"));
builder.importString(
TestUtil.joinLines(
"search test {",
" document test { ",
" field int_attribute type int { ",
" indexing: attribute ",
" attribute: fast-access",
" }",
" field predicate_attribute type predicate {",
" indexing: attribute ",
" attribute: fast-access",
" }",
" field tensor_attribute type tensor(x[]) {",
" indexing: attribute ",
" attribute: fast-access",
" }",
" field reference_attribute type reference<parent> {",
" indexing: attribute ",
" attribute: fast-access",
" }",
" }",
"}"));
exceptionRule.expect(IllegalArgumentException.class);
exceptionRule.expectMessage(
"For search 'test': The following attributes have a type that is incompatible " +
"with fast-access: predicate_attribute, tensor_attribute, reference_attribute. " +
"Predicate, tensor and reference attributes are incompatible with fast-access.");
builder.build();
} | class FastAccessValidatorTest {
@Rule
public final ExpectedException exceptionRule = ExpectedException.none();
@Test
} | class FastAccessValidatorTest {
@Rule
public final ExpectedException exceptionRule = ExpectedException.none();
@Test
} |
I think it's interesting to know whether the transition is triggered by an operator vs the system too. Something like: agent + " moved ..." | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | .forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName())); | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
Should we not *default* to prod? An element with region=foo environment=dev no longer matches region foo environment dev. | private boolean matches(Optional<Environment> elementEnvironment, RegionName elementRegion) {
if (elementEnvironment.isPresent() && ! environment.equals(elementEnvironment.get())) return false;
if ( ! elementRegion.isDefault() && ( ! region.equals(elementRegion) || ! environment.equals(Environment.prod))) return false;
return true;
} | if ( ! elementRegion.isDefault() && ( ! region.equals(elementRegion) || ! environment.equals(Environment.prod))) return false; | private boolean matches(Optional<Environment> elementEnvironment, RegionName elementRegion) {
if (elementEnvironment.isPresent() && ! environment.equals(elementEnvironment.get())) return false;
if ( ! elementRegion.isDefault() && ( ! region.equals(elementRegion) || ! environment.equals(Environment.prod))) return false;
return true;
} | class OverrideProcessor implements PreProcessor {
private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName());
private final Environment environment;
private final RegionName region;
private static final String ATTR_ID = "id";
private static final String ATTR_ENV = "environment";
private static final String ATTR_REG = "region";
public OverrideProcessor(Environment environment, RegionName region) {
this.environment = environment;
this.region = region;
}
public Document process(Document input) throws TransformerException {
log.log(LogLevel.DEBUG, "Preprocessing overrides with " + environment + "." + region);
Document ret = Xml.copyDocument(input);
Element root = ret.getDocumentElement();
applyOverrides(root, Context.empty());
return ret;
}
private void applyOverrides(Element parent, Context context) {
context = getParentContext(parent, context);
Map<String, List<Element>> elementsByTagName = elementsByTagNameAndId(XML.getChildren(parent));
retainOverriddenElements(elementsByTagName);
for (Map.Entry<String, List<Element>> entry : elementsByTagName.entrySet()) {
pruneOverrides(parent, entry.getValue(), context);
}
for (Element child : XML.getChildren(parent)) {
applyOverrides(child, context);
child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
}
}
private Context getParentContext(Element parent, Context context) {
Optional<Environment> environment = context.environment;
RegionName region = context.region;
if ( ! environment.isPresent()) {
environment = getEnvironment(parent);
}
if (region.isDefault()) {
region = getRegion(parent);
}
return Context.create(environment, region);
}
/**
* Prune overrides from parent according to deploy override rules.
*
* @param parent Parent {@link Element} above children.
* @param children Children where one {@link Element} will remain as the overriding element
* @param context Current context with environment and region.
*/
private void pruneOverrides(Element parent, List<Element> children, Context context) {
checkConsistentInheritance(children, context);
pruneNonMatchingEnvironmentsAndRegions(parent, children);
retainMostSpecificEnvironmentAndRegion(parent, children, context);
}
/**
* Ensures that environment and region does not change from something non-default to something else.
*/
private void checkConsistentInheritance(List<Element> children, Context context) {
for (Element child : children) {
Optional<Environment> env = getEnvironment(child);
RegionName reg = getRegion(child);
if (env.isPresent() && context.environment.isPresent() && !env.equals(context.environment)) {
throw new IllegalArgumentException("Environment in child (" + env.get() + ") differs from that inherited from parent (" + context.environment + ") at " + child);
}
if (!reg.isDefault() && !context.region.isDefault() && !reg.equals(context.region)) {
throw new IllegalArgumentException("Region in child (" + reg + ") differs from that inherited from parent (" + context.region + ") at " + child);
}
}
}
/**
* Prune elements that are not matching our environment and region
*/
private void pruneNonMatchingEnvironmentsAndRegions(Element parent, List<Element> children) {
Iterator<Element> elemIt = children.iterator();
while (elemIt.hasNext()) {
Element child = elemIt.next();
if ( ! matches(getEnvironment(child), getRegion(child))) {
parent.removeChild(child);
elemIt.remove();
}
}
}
/**
* Find the most specific element and remove all others.
*/
private void retainMostSpecificEnvironmentAndRegion(Element parent, List<Element> children, Context context) {
List<Element> bestMatches = new ArrayList<>();
int bestMatch = 0;
for (Element child : children) {
bestMatch = updateBestMatches(bestMatches, child, bestMatch, context);
}
if (bestMatch > 0) {
doElementSpecificProcessingOnOverride(bestMatches);
for (Element child : children) {
if ( ! bestMatches.contains(child)) {
parent.removeChild(child);
}
}
}
}
private int updateBestMatches(List<Element> bestMatches, Element child, int bestMatch, Context context) {
int overrideCount = getNumberOfOverrides(child, context);
if (overrideCount >= bestMatch) {
if (overrideCount > bestMatch)
bestMatches.clear();
bestMatches.add(child);
return overrideCount;
} else {
return bestMatch;
}
}
private int getNumberOfOverrides(Element child, Context context) {
int currentMatch = 0;
Optional<Environment> elementEnvironment = hasEnvironment(child) ? getEnvironment(child) : context.environment;
RegionName elementRegion = hasRegion(child) ? getRegion(child) : context.region;
if (elementEnvironment.isPresent() && elementEnvironment.get().equals(environment))
currentMatch++;
if ( ! elementRegion.isDefault() && elementRegion.equals(region))
currentMatch++;
return currentMatch;
}
/** Called on each element which is selected by matching some override condition */
private void doElementSpecificProcessingOnOverride(List<Element> elements) {
elements.forEach(element -> {
if (element.getTagName().equals("nodes"))
if (element.getChildNodes().getLength() == 0)
element.setAttribute("required", "true");
});
}
/**
* Retains all elements where at least one element is overridden. Removes non-overridden elements from map.
*/
private void retainOverriddenElements(Map<String, List<Element>> elementsByTagName) {
Iterator<Map.Entry<String, List<Element>>> it = elementsByTagName.entrySet().iterator();
while (it.hasNext()) {
List<Element> elements = it.next().getValue();
boolean hasOverrides = false;
for (Element element : elements) {
if (hasEnvironment(element) || hasRegion(element)) {
hasOverrides = true;
}
}
if (!hasOverrides) {
it.remove();
}
}
}
private boolean hasRegion(Element element) {
return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
}
private boolean hasEnvironment(Element element) {
return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
}
private Optional<Environment> getEnvironment(Element element) {
String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
if (env == null || env.isEmpty()) {
return Optional.empty();
}
return Optional.of(Environment.from(env));
}
private RegionName getRegion(Element element) {
String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
if (reg == null || reg.isEmpty()) {
return RegionName.defaultName();
}
return RegionName.from(reg);
}
private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) {
Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>();
for (Element child : children) {
String key = child.getTagName();
if (child.hasAttribute(ATTR_ID)) {
key += child.getAttribute(ATTR_ID);
}
if (!elementsByTagName.containsKey(key)) {
elementsByTagName.put(key, new ArrayList<>());
}
elementsByTagName.get(key).add(child);
}
return elementsByTagName;
}
private static String getPrintableElement(Element element) {
StringBuilder sb = new StringBuilder(element.getTagName());
final NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
sb.append(" ").append(attributes.item(i).getNodeName());
}
return sb.toString();
}
private static String getPrintableElementRecursive(Element element) {
StringBuilder sb = new StringBuilder();
sb.append(element.getTagName());
final NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
sb.append(" ")
.append(attributes.item(i).getNodeName())
.append("=")
.append(attributes.item(i).getNodeValue());
}
final List<Element> children = XML.getChildren(element);
if (children.size() > 0) {
sb.append("\n");
for (Element e : children)
sb.append("\t").append(getPrintableElementRecursive(e));
}
return sb.toString();
}
/**
* Represents environment and region in a given context.
*/
private static final class Context {
final Optional<Environment> environment;
final RegionName region;
private Context(Optional<Environment> environment, RegionName region) {
this.environment = environment;
this.region = region;
}
static Context empty() {
return new Context(Optional.empty(), RegionName.defaultName());
}
public static Context create(Optional<Environment> environment, RegionName region) {
return new Context(environment, region);
}
}
} | class OverrideProcessor implements PreProcessor {
private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName());
private final Environment environment;
private final RegionName region;
private static final String ATTR_ID = "id";
private static final String ATTR_ENV = "environment";
private static final String ATTR_REG = "region";
public OverrideProcessor(Environment environment, RegionName region) {
this.environment = environment;
this.region = region;
}
public Document process(Document input) throws TransformerException {
log.log(LogLevel.DEBUG, "Preprocessing overrides with " + environment + "." + region);
Document ret = Xml.copyDocument(input);
Element root = ret.getDocumentElement();
applyOverrides(root, Context.empty());
return ret;
}
private void applyOverrides(Element parent, Context context) {
context = getParentContext(parent, context);
Map<String, List<Element>> elementsByTagName = elementsByTagNameAndId(XML.getChildren(parent));
retainOverriddenElements(elementsByTagName);
for (Map.Entry<String, List<Element>> entry : elementsByTagName.entrySet()) {
pruneOverrides(parent, entry.getValue(), context);
}
for (Element child : XML.getChildren(parent)) {
applyOverrides(child, context);
child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
}
}
private Context getParentContext(Element parent, Context context) {
Optional<Environment> environment = context.environment;
RegionName region = context.region;
if ( ! environment.isPresent()) {
environment = getEnvironment(parent);
}
if (region.isDefault()) {
region = getRegion(parent);
}
return Context.create(environment, region);
}
/**
* Prune overrides from parent according to deploy override rules.
*
* @param parent Parent {@link Element} above children.
* @param children Children where one {@link Element} will remain as the overriding element
* @param context Current context with environment and region.
*/
private void pruneOverrides(Element parent, List<Element> children, Context context) {
checkConsistentInheritance(children, context);
pruneNonMatchingEnvironmentsAndRegions(parent, children);
retainMostSpecificEnvironmentAndRegion(parent, children, context);
}
/**
* Ensures that environment and region does not change from something non-default to something else.
*/
private void checkConsistentInheritance(List<Element> children, Context context) {
for (Element child : children) {
Optional<Environment> env = getEnvironment(child);
RegionName reg = getRegion(child);
if (env.isPresent() && context.environment.isPresent() && !env.equals(context.environment)) {
throw new IllegalArgumentException("Environment in child (" + env.get() + ") differs from that inherited from parent (" + context.environment + ") at " + child);
}
if (!reg.isDefault() && !context.region.isDefault() && !reg.equals(context.region)) {
throw new IllegalArgumentException("Region in child (" + reg + ") differs from that inherited from parent (" + context.region + ") at " + child);
}
}
}
/**
* Prune elements that are not matching our environment and region
*/
private void pruneNonMatchingEnvironmentsAndRegions(Element parent, List<Element> children) {
Iterator<Element> elemIt = children.iterator();
while (elemIt.hasNext()) {
Element child = elemIt.next();
if ( ! matches(getEnvironment(child), getRegion(child))) {
parent.removeChild(child);
elemIt.remove();
}
}
}
/**
* Find the most specific element and remove all others.
*/
private void retainMostSpecificEnvironmentAndRegion(Element parent, List<Element> children, Context context) {
List<Element> bestMatches = new ArrayList<>();
int bestMatch = 0;
for (Element child : children) {
bestMatch = updateBestMatches(bestMatches, child, bestMatch, context);
}
if (bestMatch > 0) {
doElementSpecificProcessingOnOverride(bestMatches);
for (Element child : children) {
if ( ! bestMatches.contains(child)) {
parent.removeChild(child);
}
}
}
}
private int updateBestMatches(List<Element> bestMatches, Element child, int bestMatch, Context context) {
int overrideCount = getNumberOfOverrides(child, context);
if (overrideCount >= bestMatch) {
if (overrideCount > bestMatch)
bestMatches.clear();
bestMatches.add(child);
return overrideCount;
} else {
return bestMatch;
}
}
private int getNumberOfOverrides(Element child, Context context) {
int currentMatch = 0;
Optional<Environment> elementEnvironment = hasEnvironment(child) ? getEnvironment(child) : context.environment;
RegionName elementRegion = hasRegion(child) ? getRegion(child) : context.region;
if (elementEnvironment.isPresent() && elementEnvironment.get().equals(environment))
currentMatch++;
if ( ! elementRegion.isDefault() && elementRegion.equals(region))
currentMatch++;
return currentMatch;
}
/** Called on each element which is selected by matching some override condition */
private void doElementSpecificProcessingOnOverride(List<Element> elements) {
elements.forEach(element -> {
if (element.getTagName().equals("nodes"))
if (element.getChildNodes().getLength() == 0)
element.setAttribute("required", "true");
});
}
/**
* Retains all elements where at least one element is overridden. Removes non-overridden elements from map.
*/
private void retainOverriddenElements(Map<String, List<Element>> elementsByTagName) {
Iterator<Map.Entry<String, List<Element>>> it = elementsByTagName.entrySet().iterator();
while (it.hasNext()) {
List<Element> elements = it.next().getValue();
boolean hasOverrides = false;
for (Element element : elements) {
if (hasEnvironment(element) || hasRegion(element)) {
hasOverrides = true;
}
}
if (!hasOverrides) {
it.remove();
}
}
}
private boolean hasRegion(Element element) {
return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
}
private boolean hasEnvironment(Element element) {
return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
}
private Optional<Environment> getEnvironment(Element element) {
String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
if (env == null || env.isEmpty()) {
return Optional.empty();
}
return Optional.of(Environment.from(env));
}
private RegionName getRegion(Element element) {
String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
if (reg == null || reg.isEmpty()) {
return RegionName.defaultName();
}
return RegionName.from(reg);
}
private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) {
Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>();
for (Element child : children) {
String key = child.getTagName();
if (child.hasAttribute(ATTR_ID)) {
key += child.getAttribute(ATTR_ID);
}
if (!elementsByTagName.containsKey(key)) {
elementsByTagName.put(key, new ArrayList<>());
}
elementsByTagName.get(key).add(child);
}
return elementsByTagName;
}
private static String getPrintableElement(Element element) {
StringBuilder sb = new StringBuilder(element.getTagName());
final NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
sb.append(" ").append(attributes.item(i).getNodeName());
}
return sb.toString();
}
private static String getPrintableElementRecursive(Element element) {
StringBuilder sb = new StringBuilder();
sb.append(element.getTagName());
final NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
sb.append(" ")
.append(attributes.item(i).getNodeName())
.append("=")
.append(attributes.item(i).getNodeValue());
}
final List<Element> children = XML.getChildren(element);
if (children.size() > 0) {
sb.append("\n");
for (Element e : children)
sb.append("\t").append(getPrintableElementRecursive(e));
}
return sb.toString();
}
/**
* Represents environment and region in a given context.
*/
private static final class Context {
final Optional<Environment> environment;
final RegionName region;
private Context(Optional<Environment> environment, RegionName region) {
this.environment = environment;
this.region = region;
}
static Context empty() {
return new Context(Optional.empty(), RegionName.defaultName());
}
public static Context create(Optional<Environment> environment, RegionName region) {
return new Context(environment, region);
}
}
} |
I agree; that is better: https://github.com/yahoo/vespa/pull/2209 | private boolean matches(Optional<Environment> elementEnvironment, RegionName elementRegion) {
if (elementEnvironment.isPresent() && ! environment.equals(elementEnvironment.get())) return false;
if ( ! elementRegion.isDefault() && ( ! region.equals(elementRegion) || ! environment.equals(Environment.prod))) return false;
return true;
} | if ( ! elementRegion.isDefault() && ( ! region.equals(elementRegion) || ! environment.equals(Environment.prod))) return false; | private boolean matches(Optional<Environment> elementEnvironment, RegionName elementRegion) {
if (elementEnvironment.isPresent() && ! environment.equals(elementEnvironment.get())) return false;
if ( ! elementRegion.isDefault() && ( ! region.equals(elementRegion) || ! environment.equals(Environment.prod))) return false;
return true;
} | class OverrideProcessor implements PreProcessor {
private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName());
private final Environment environment;
private final RegionName region;
private static final String ATTR_ID = "id";
private static final String ATTR_ENV = "environment";
private static final String ATTR_REG = "region";
public OverrideProcessor(Environment environment, RegionName region) {
this.environment = environment;
this.region = region;
}
public Document process(Document input) throws TransformerException {
log.log(LogLevel.DEBUG, "Preprocessing overrides with " + environment + "." + region);
Document ret = Xml.copyDocument(input);
Element root = ret.getDocumentElement();
applyOverrides(root, Context.empty());
return ret;
}
private void applyOverrides(Element parent, Context context) {
context = getParentContext(parent, context);
Map<String, List<Element>> elementsByTagName = elementsByTagNameAndId(XML.getChildren(parent));
retainOverriddenElements(elementsByTagName);
for (Map.Entry<String, List<Element>> entry : elementsByTagName.entrySet()) {
pruneOverrides(parent, entry.getValue(), context);
}
for (Element child : XML.getChildren(parent)) {
applyOverrides(child, context);
child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
}
}
private Context getParentContext(Element parent, Context context) {
Optional<Environment> environment = context.environment;
RegionName region = context.region;
if ( ! environment.isPresent()) {
environment = getEnvironment(parent);
}
if (region.isDefault()) {
region = getRegion(parent);
}
return Context.create(environment, region);
}
/**
* Prune overrides from parent according to deploy override rules.
*
* @param parent Parent {@link Element} above children.
* @param children Children where one {@link Element} will remain as the overriding element
* @param context Current context with environment and region.
*/
private void pruneOverrides(Element parent, List<Element> children, Context context) {
checkConsistentInheritance(children, context);
pruneNonMatchingEnvironmentsAndRegions(parent, children);
retainMostSpecificEnvironmentAndRegion(parent, children, context);
}
/**
* Ensures that environment and region does not change from something non-default to something else.
*/
private void checkConsistentInheritance(List<Element> children, Context context) {
for (Element child : children) {
Optional<Environment> env = getEnvironment(child);
RegionName reg = getRegion(child);
if (env.isPresent() && context.environment.isPresent() && !env.equals(context.environment)) {
throw new IllegalArgumentException("Environment in child (" + env.get() + ") differs from that inherited from parent (" + context.environment + ") at " + child);
}
if (!reg.isDefault() && !context.region.isDefault() && !reg.equals(context.region)) {
throw new IllegalArgumentException("Region in child (" + reg + ") differs from that inherited from parent (" + context.region + ") at " + child);
}
}
}
/**
* Prune elements that are not matching our environment and region
*/
private void pruneNonMatchingEnvironmentsAndRegions(Element parent, List<Element> children) {
Iterator<Element> elemIt = children.iterator();
while (elemIt.hasNext()) {
Element child = elemIt.next();
if ( ! matches(getEnvironment(child), getRegion(child))) {
parent.removeChild(child);
elemIt.remove();
}
}
}
/**
* Find the most specific element and remove all others.
*/
private void retainMostSpecificEnvironmentAndRegion(Element parent, List<Element> children, Context context) {
List<Element> bestMatches = new ArrayList<>();
int bestMatch = 0;
for (Element child : children) {
bestMatch = updateBestMatches(bestMatches, child, bestMatch, context);
}
if (bestMatch > 0) {
doElementSpecificProcessingOnOverride(bestMatches);
for (Element child : children) {
if ( ! bestMatches.contains(child)) {
parent.removeChild(child);
}
}
}
}
private int updateBestMatches(List<Element> bestMatches, Element child, int bestMatch, Context context) {
int overrideCount = getNumberOfOverrides(child, context);
if (overrideCount >= bestMatch) {
if (overrideCount > bestMatch)
bestMatches.clear();
bestMatches.add(child);
return overrideCount;
} else {
return bestMatch;
}
}
private int getNumberOfOverrides(Element child, Context context) {
int currentMatch = 0;
Optional<Environment> elementEnvironment = hasEnvironment(child) ? getEnvironment(child) : context.environment;
RegionName elementRegion = hasRegion(child) ? getRegion(child) : context.region;
if (elementEnvironment.isPresent() && elementEnvironment.get().equals(environment))
currentMatch++;
if ( ! elementRegion.isDefault() && elementRegion.equals(region))
currentMatch++;
return currentMatch;
}
/** Called on each element which is selected by matching some override condition */
private void doElementSpecificProcessingOnOverride(List<Element> elements) {
elements.forEach(element -> {
if (element.getTagName().equals("nodes"))
if (element.getChildNodes().getLength() == 0)
element.setAttribute("required", "true");
});
}
/**
* Retains all elements where at least one element is overridden. Removes non-overridden elements from map.
*/
private void retainOverriddenElements(Map<String, List<Element>> elementsByTagName) {
Iterator<Map.Entry<String, List<Element>>> it = elementsByTagName.entrySet().iterator();
while (it.hasNext()) {
List<Element> elements = it.next().getValue();
boolean hasOverrides = false;
for (Element element : elements) {
if (hasEnvironment(element) || hasRegion(element)) {
hasOverrides = true;
}
}
if (!hasOverrides) {
it.remove();
}
}
}
private boolean hasRegion(Element element) {
return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
}
private boolean hasEnvironment(Element element) {
return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
}
private Optional<Environment> getEnvironment(Element element) {
String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
if (env == null || env.isEmpty()) {
return Optional.empty();
}
return Optional.of(Environment.from(env));
}
private RegionName getRegion(Element element) {
String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
if (reg == null || reg.isEmpty()) {
return RegionName.defaultName();
}
return RegionName.from(reg);
}
private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) {
Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>();
for (Element child : children) {
String key = child.getTagName();
if (child.hasAttribute(ATTR_ID)) {
key += child.getAttribute(ATTR_ID);
}
if (!elementsByTagName.containsKey(key)) {
elementsByTagName.put(key, new ArrayList<>());
}
elementsByTagName.get(key).add(child);
}
return elementsByTagName;
}
private static String getPrintableElement(Element element) {
StringBuilder sb = new StringBuilder(element.getTagName());
final NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
sb.append(" ").append(attributes.item(i).getNodeName());
}
return sb.toString();
}
private static String getPrintableElementRecursive(Element element) {
StringBuilder sb = new StringBuilder();
sb.append(element.getTagName());
final NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
sb.append(" ")
.append(attributes.item(i).getNodeName())
.append("=")
.append(attributes.item(i).getNodeValue());
}
final List<Element> children = XML.getChildren(element);
if (children.size() > 0) {
sb.append("\n");
for (Element e : children)
sb.append("\t").append(getPrintableElementRecursive(e));
}
return sb.toString();
}
/**
* Represents environment and region in a given context.
*/
private static final class Context {
final Optional<Environment> environment;
final RegionName region;
private Context(Optional<Environment> environment, RegionName region) {
this.environment = environment;
this.region = region;
}
static Context empty() {
return new Context(Optional.empty(), RegionName.defaultName());
}
public static Context create(Optional<Environment> environment, RegionName region) {
return new Context(environment, region);
}
}
} | class OverrideProcessor implements PreProcessor {
private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName());
private final Environment environment;
private final RegionName region;
private static final String ATTR_ID = "id";
private static final String ATTR_ENV = "environment";
private static final String ATTR_REG = "region";
public OverrideProcessor(Environment environment, RegionName region) {
this.environment = environment;
this.region = region;
}
public Document process(Document input) throws TransformerException {
log.log(LogLevel.DEBUG, "Preprocessing overrides with " + environment + "." + region);
Document ret = Xml.copyDocument(input);
Element root = ret.getDocumentElement();
applyOverrides(root, Context.empty());
return ret;
}
private void applyOverrides(Element parent, Context context) {
context = getParentContext(parent, context);
Map<String, List<Element>> elementsByTagName = elementsByTagNameAndId(XML.getChildren(parent));
retainOverriddenElements(elementsByTagName);
for (Map.Entry<String, List<Element>> entry : elementsByTagName.entrySet()) {
pruneOverrides(parent, entry.getValue(), context);
}
for (Element child : XML.getChildren(parent)) {
applyOverrides(child, context);
child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
child.removeAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
}
}
private Context getParentContext(Element parent, Context context) {
Optional<Environment> environment = context.environment;
RegionName region = context.region;
if ( ! environment.isPresent()) {
environment = getEnvironment(parent);
}
if (region.isDefault()) {
region = getRegion(parent);
}
return Context.create(environment, region);
}
/**
* Prune overrides from parent according to deploy override rules.
*
* @param parent Parent {@link Element} above children.
* @param children Children where one {@link Element} will remain as the overriding element
* @param context Current context with environment and region.
*/
private void pruneOverrides(Element parent, List<Element> children, Context context) {
checkConsistentInheritance(children, context);
pruneNonMatchingEnvironmentsAndRegions(parent, children);
retainMostSpecificEnvironmentAndRegion(parent, children, context);
}
/**
* Ensures that environment and region does not change from something non-default to something else.
*/
private void checkConsistentInheritance(List<Element> children, Context context) {
for (Element child : children) {
Optional<Environment> env = getEnvironment(child);
RegionName reg = getRegion(child);
if (env.isPresent() && context.environment.isPresent() && !env.equals(context.environment)) {
throw new IllegalArgumentException("Environment in child (" + env.get() + ") differs from that inherited from parent (" + context.environment + ") at " + child);
}
if (!reg.isDefault() && !context.region.isDefault() && !reg.equals(context.region)) {
throw new IllegalArgumentException("Region in child (" + reg + ") differs from that inherited from parent (" + context.region + ") at " + child);
}
}
}
/**
* Prune elements that are not matching our environment and region
*/
private void pruneNonMatchingEnvironmentsAndRegions(Element parent, List<Element> children) {
Iterator<Element> elemIt = children.iterator();
while (elemIt.hasNext()) {
Element child = elemIt.next();
if ( ! matches(getEnvironment(child), getRegion(child))) {
parent.removeChild(child);
elemIt.remove();
}
}
}
/**
* Find the most specific element and remove all others.
*/
private void retainMostSpecificEnvironmentAndRegion(Element parent, List<Element> children, Context context) {
List<Element> bestMatches = new ArrayList<>();
int bestMatch = 0;
for (Element child : children) {
bestMatch = updateBestMatches(bestMatches, child, bestMatch, context);
}
if (bestMatch > 0) {
doElementSpecificProcessingOnOverride(bestMatches);
for (Element child : children) {
if ( ! bestMatches.contains(child)) {
parent.removeChild(child);
}
}
}
}
private int updateBestMatches(List<Element> bestMatches, Element child, int bestMatch, Context context) {
int overrideCount = getNumberOfOverrides(child, context);
if (overrideCount >= bestMatch) {
if (overrideCount > bestMatch)
bestMatches.clear();
bestMatches.add(child);
return overrideCount;
} else {
return bestMatch;
}
}
private int getNumberOfOverrides(Element child, Context context) {
int currentMatch = 0;
Optional<Environment> elementEnvironment = hasEnvironment(child) ? getEnvironment(child) : context.environment;
RegionName elementRegion = hasRegion(child) ? getRegion(child) : context.region;
if (elementEnvironment.isPresent() && elementEnvironment.get().equals(environment))
currentMatch++;
if ( ! elementRegion.isDefault() && elementRegion.equals(region))
currentMatch++;
return currentMatch;
}
/** Called on each element which is selected by matching some override condition */
private void doElementSpecificProcessingOnOverride(List<Element> elements) {
elements.forEach(element -> {
if (element.getTagName().equals("nodes"))
if (element.getChildNodes().getLength() == 0)
element.setAttribute("required", "true");
});
}
/**
* Retains all elements where at least one element is overridden. Removes non-overridden elements from map.
*/
private void retainOverriddenElements(Map<String, List<Element>> elementsByTagName) {
Iterator<Map.Entry<String, List<Element>>> it = elementsByTagName.entrySet().iterator();
while (it.hasNext()) {
List<Element> elements = it.next().getValue();
boolean hasOverrides = false;
for (Element element : elements) {
if (hasEnvironment(element) || hasRegion(element)) {
hasOverrides = true;
}
}
if (!hasOverrides) {
it.remove();
}
}
}
private boolean hasRegion(Element element) {
return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
}
private boolean hasEnvironment(Element element) {
return element.hasAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
}
private Optional<Environment> getEnvironment(Element element) {
String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_ENV);
if (env == null || env.isEmpty()) {
return Optional.empty();
}
return Optional.of(Environment.from(env));
}
private RegionName getRegion(Element element) {
String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ATTR_REG);
if (reg == null || reg.isEmpty()) {
return RegionName.defaultName();
}
return RegionName.from(reg);
}
private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) {
Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>();
for (Element child : children) {
String key = child.getTagName();
if (child.hasAttribute(ATTR_ID)) {
key += child.getAttribute(ATTR_ID);
}
if (!elementsByTagName.containsKey(key)) {
elementsByTagName.put(key, new ArrayList<>());
}
elementsByTagName.get(key).add(child);
}
return elementsByTagName;
}
private static String getPrintableElement(Element element) {
StringBuilder sb = new StringBuilder(element.getTagName());
final NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
sb.append(" ").append(attributes.item(i).getNodeName());
}
return sb.toString();
}
private static String getPrintableElementRecursive(Element element) {
StringBuilder sb = new StringBuilder();
sb.append(element.getTagName());
final NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
sb.append(" ")
.append(attributes.item(i).getNodeName())
.append("=")
.append(attributes.item(i).getNodeValue());
}
final List<Element> children = XML.getChildren(element);
if (children.size() > 0) {
sb.append("\n");
for (Element e : children)
sb.append("\t").append(getPrintableElementRecursive(e));
}
return sb.toString();
}
/**
* Represents environment and region in a given context.
*/
private static final class Context {
final Optional<Environment> environment;
final RegionName region;
private Context(Optional<Environment> environment, RegionName region) {
this.environment = environment;
this.region = region;
}
static Context empty() {
return new Context(Optional.empty(), RegionName.defaultName());
}
public static Context create(Optional<Environment> environment, RegionName region) {
return new Context(environment, region);
}
}
} |
stopServices may throw an exception on failure. But we want to ignore such a failure, I think. @hmusum Do we also want to ignore stopServices() if active? I think so. Taking down the container may be considered equivalent to a reboot of a node, in case stopping the services are first tried, then forced (KILL), as part of reboot. | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
stopServices();
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | stopServices(); | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Agree, I think we should ignore failing stopServices() | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
stopServices();
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | stopServices(); | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Added `try/catch` around `stopServices()`. PTAL. | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
stopServices();
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | stopServices(); | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Also log the exception. | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring");
}
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | logger.info("Failed stopping services, ignoring"); | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Fixed. | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring");
}
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | logger.info("Failed stopping services, ignoring"); | private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec) {
Optional<Container> existingContainer = getContainer();
if (!existingContainer.isPresent()) return Optional.empty();
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer.get());
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get());
if (existingContainer.get().state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
vespaVersion = Optional.empty();
dockerOperations.removeContainer(existingContainer.get());
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
return Optional.empty();
}
return existingContainer;
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
private Optional<String> vespaVersion = Optional.empty();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
vespaVersion = dockerOperations.getVespaVersion(container.name);
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void experimentalWriteFile(final ContainerNodeSpec nodeSpec) {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (! config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
experimentalWriteFile(nodeSpec);
addDebugMessage("Starting optional node program resume command");
logger.info("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
}
private void updateNodeRepoAndMarkNodeAsReady(ContainerNodeSpec nodeSpec) {
publishStateToNodeRepoIfChanged(
new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
nodeRepository.markAsReady(nodeSpec.hostname);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.orElse(new DockerImage("")))
.withVespaVersion(vespaVersion.orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainerIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! getContainer().isPresent()) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
metricReceiver.unsetMetricsForContainer(hostname);
lastCpuMetric = new CpuUsageReporter(clock.instant());
vespaVersion = dockerOperations.getVespaVersion(containerName);
configureContainerMetrics(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
}
}
private void removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec) {
removeContainerIfNeeded(nodeSpec).ifPresent(existingContainer ->
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + existingContainer + ": " + restartReason);
restartServices(nodeSpec, existingContainer);
}));
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if ( ! nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (! nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, nodeSpec, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
removeContainerIfNeededUpdateContainerState(nodeSpec);
startContainerIfNeeded(nodeSpec);
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if ( ! containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
long currentCpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
long currentCpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(currentCpuContainerTotalTime, currentCpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
long bytesInGB = 1 << 30;
nodeSpec.minDiskAvailableGb.ifPresent(diskGB -> metricReceiver
.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskGB * bytesInGB));
storageMaintainer.ifPresent(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName)
.forEach((metricName, metricValue) ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, metricName).sample(metricValue.doubleValue())));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
private void configureContainerMetrics(ContainerNodeSpec nodeSpec) {
if (! storageMaintainer.isPresent()) return;
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner ->
scheduleMaker
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
scheduleMaker
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
vespaVersion.ifPresent(version -> scheduleMaker.withTag("vespaVersion", version));
try {
scheduleMaker.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[] {"service" , "yamas-agent", "restart"};
dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
How about using version from docker image if it exists. Else we might end up setting wantedDockerImage to an image with the newest vespa version. | private static ClusterMembership readMembership(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembership).asString(),
object.field(hostSpecVespaVersion).valid() ?
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersion).asString()) :
Vtag.currentVersion);
} | Vtag.currentVersion); | private static ClusterMembership readMembership(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembership).asString(),
object.field(hostSpecVespaVersion).valid() ?
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersion).asString()) :
( object.field(dockerImage).valid() ? new DockerImage(object.field(dockerImage).asString()).tagAsVersion() : Vtag.currentVersion));
} | class ProvisionInfo {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostName = "hostName";
private static final String hostSpecMembership = "membership";
private static final String hostSpecFlavor = "flavor";
private static final String hostSpecVespaVersion = "vespaVersion";
private final Set<HostSpec> hosts = new LinkedHashSet<>();
private ProvisionInfo(Set<HostSpec> hosts) {
this.hosts.addAll(hosts);
}
public static ProvisionInfo withHosts(Set<HostSpec> hosts) {
return new ProvisionInfo(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts) {
Cursor object = array.addObject();
serializeHostSpec(object.setObject(hostSpecKey), host);
}
}
private void serializeHostSpec(Cursor cursor, HostSpec host) {
cursor.setString(hostSpecHostName, host.hostname());
if (host.membership().isPresent()) {
cursor.setString(hostSpecMembership, host.membership().get().stringValue());
cursor.setString(hostSpecVespaVersion, host.membership().get().cluster().vespaVersion().toString());
}
if (host.flavor().isPresent())
cursor.setString(hostSpecFlavor, host.flavor().get().name());
}
public Set<HostSpec> getHosts() {
return Collections.unmodifiableSet(hosts);
}
private static ProvisionInfo fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse(new ArrayTraverser() {
@Override
public void entry(int i, Inspector inspector) {
hosts.add(deserializeHostSpec(inspector.field(hostSpecKey), nodeFlavors));
}
});
return new ProvisionInfo(hosts);
}
private static HostSpec deserializeHostSpec(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembership).valid() ? Optional.of(readMembership(object)) : Optional.empty();
Optional<Flavor> flavor =
object.field(hostSpecFlavor).valid() ? readFlavor(object, nodeFlavors) : Optional.empty();
return new HostSpec(object.field(hostSpecHostName).asString(),Collections.emptyList(), flavor, membership);
}
private static Optional<Flavor> readFlavor(Inspector object, Optional<NodeFlavors> nodeFlavors) {
return nodeFlavors.map(flavorMapper -> flavorMapper.getFlavor(object.field(hostSpecFlavor).asString()))
.orElse(Optional.empty());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static ProvisionInfo fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
public ProvisionInfo merge(ProvisionInfo provisionInfo) {
Set<HostSpec> mergedSet = new LinkedHashSet<>();
mergedSet.addAll(this.hosts);
mergedSet.addAll(provisionInfo.getHosts());
return ProvisionInfo.withHosts(mergedSet);
}
} | class ProvisionInfo {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostName = "hostName";
private static final String hostSpecMembership = "membership";
private static final String hostSpecFlavor = "flavor";
private static final String hostSpecVespaVersion = "vespaVersion";
private static final String dockerImage = "dockerImage";
private final Set<HostSpec> hosts = new LinkedHashSet<>();
private ProvisionInfo(Set<HostSpec> hosts) {
this.hosts.addAll(hosts);
}
public static ProvisionInfo withHosts(Set<HostSpec> hosts) {
return new ProvisionInfo(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts) {
Cursor object = array.addObject();
serializeHostSpec(object.setObject(hostSpecKey), host);
}
}
private void serializeHostSpec(Cursor cursor, HostSpec host) {
cursor.setString(hostSpecHostName, host.hostname());
if (host.membership().isPresent()) {
cursor.setString(hostSpecMembership, host.membership().get().stringValue());
cursor.setString(hostSpecVespaVersion, host.membership().get().cluster().vespaVersion().toString());
}
if (host.flavor().isPresent())
cursor.setString(hostSpecFlavor, host.flavor().get().name());
}
public Set<HostSpec> getHosts() {
return Collections.unmodifiableSet(hosts);
}
private static ProvisionInfo fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse(new ArrayTraverser() {
@Override
public void entry(int i, Inspector inspector) {
hosts.add(deserializeHostSpec(inspector.field(hostSpecKey), nodeFlavors));
}
});
return new ProvisionInfo(hosts);
}
private static HostSpec deserializeHostSpec(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembership).valid() ? Optional.of(readMembership(object)) : Optional.empty();
Optional<Flavor> flavor =
object.field(hostSpecFlavor).valid() ? readFlavor(object, nodeFlavors) : Optional.empty();
return new HostSpec(object.field(hostSpecHostName).asString(),Collections.emptyList(), flavor, membership);
}
private static Optional<Flavor> readFlavor(Inspector object, Optional<NodeFlavors> nodeFlavors) {
return nodeFlavors.map(flavorMapper -> flavorMapper.getFlavor(object.field(hostSpecFlavor).asString()))
.orElse(Optional.empty());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static ProvisionInfo fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
public ProvisionInfo merge(ProvisionInfo provisionInfo) {
Set<HostSpec> mergedSet = new LinkedHashSet<>();
mergedSet.addAll(this.hosts);
mergedSet.addAll(provisionInfo.getHosts());
return ProvisionInfo.withHosts(mergedSet);
}
} |
Good idea, done. | private static ClusterMembership readMembership(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembership).asString(),
object.field(hostSpecVespaVersion).valid() ?
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersion).asString()) :
Vtag.currentVersion);
} | Vtag.currentVersion); | private static ClusterMembership readMembership(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembership).asString(),
object.field(hostSpecVespaVersion).valid() ?
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersion).asString()) :
( object.field(dockerImage).valid() ? new DockerImage(object.field(dockerImage).asString()).tagAsVersion() : Vtag.currentVersion));
} | class ProvisionInfo {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostName = "hostName";
private static final String hostSpecMembership = "membership";
private static final String hostSpecFlavor = "flavor";
private static final String hostSpecVespaVersion = "vespaVersion";
private final Set<HostSpec> hosts = new LinkedHashSet<>();
private ProvisionInfo(Set<HostSpec> hosts) {
this.hosts.addAll(hosts);
}
public static ProvisionInfo withHosts(Set<HostSpec> hosts) {
return new ProvisionInfo(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts) {
Cursor object = array.addObject();
serializeHostSpec(object.setObject(hostSpecKey), host);
}
}
private void serializeHostSpec(Cursor cursor, HostSpec host) {
cursor.setString(hostSpecHostName, host.hostname());
if (host.membership().isPresent()) {
cursor.setString(hostSpecMembership, host.membership().get().stringValue());
cursor.setString(hostSpecVespaVersion, host.membership().get().cluster().vespaVersion().toString());
}
if (host.flavor().isPresent())
cursor.setString(hostSpecFlavor, host.flavor().get().name());
}
public Set<HostSpec> getHosts() {
return Collections.unmodifiableSet(hosts);
}
private static ProvisionInfo fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse(new ArrayTraverser() {
@Override
public void entry(int i, Inspector inspector) {
hosts.add(deserializeHostSpec(inspector.field(hostSpecKey), nodeFlavors));
}
});
return new ProvisionInfo(hosts);
}
private static HostSpec deserializeHostSpec(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembership).valid() ? Optional.of(readMembership(object)) : Optional.empty();
Optional<Flavor> flavor =
object.field(hostSpecFlavor).valid() ? readFlavor(object, nodeFlavors) : Optional.empty();
return new HostSpec(object.field(hostSpecHostName).asString(),Collections.emptyList(), flavor, membership);
}
private static Optional<Flavor> readFlavor(Inspector object, Optional<NodeFlavors> nodeFlavors) {
return nodeFlavors.map(flavorMapper -> flavorMapper.getFlavor(object.field(hostSpecFlavor).asString()))
.orElse(Optional.empty());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static ProvisionInfo fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
public ProvisionInfo merge(ProvisionInfo provisionInfo) {
Set<HostSpec> mergedSet = new LinkedHashSet<>();
mergedSet.addAll(this.hosts);
mergedSet.addAll(provisionInfo.getHosts());
return ProvisionInfo.withHosts(mergedSet);
}
} | class ProvisionInfo {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostName = "hostName";
private static final String hostSpecMembership = "membership";
private static final String hostSpecFlavor = "flavor";
private static final String hostSpecVespaVersion = "vespaVersion";
private static final String dockerImage = "dockerImage";
private final Set<HostSpec> hosts = new LinkedHashSet<>();
private ProvisionInfo(Set<HostSpec> hosts) {
this.hosts.addAll(hosts);
}
public static ProvisionInfo withHosts(Set<HostSpec> hosts) {
return new ProvisionInfo(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts) {
Cursor object = array.addObject();
serializeHostSpec(object.setObject(hostSpecKey), host);
}
}
private void serializeHostSpec(Cursor cursor, HostSpec host) {
cursor.setString(hostSpecHostName, host.hostname());
if (host.membership().isPresent()) {
cursor.setString(hostSpecMembership, host.membership().get().stringValue());
cursor.setString(hostSpecVespaVersion, host.membership().get().cluster().vespaVersion().toString());
}
if (host.flavor().isPresent())
cursor.setString(hostSpecFlavor, host.flavor().get().name());
}
public Set<HostSpec> getHosts() {
return Collections.unmodifiableSet(hosts);
}
private static ProvisionInfo fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse(new ArrayTraverser() {
@Override
public void entry(int i, Inspector inspector) {
hosts.add(deserializeHostSpec(inspector.field(hostSpecKey), nodeFlavors));
}
});
return new ProvisionInfo(hosts);
}
private static HostSpec deserializeHostSpec(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembership).valid() ? Optional.of(readMembership(object)) : Optional.empty();
Optional<Flavor> flavor =
object.field(hostSpecFlavor).valid() ? readFlavor(object, nodeFlavors) : Optional.empty();
return new HostSpec(object.field(hostSpecHostName).asString(),Collections.emptyList(), flavor, membership);
}
private static Optional<Flavor> readFlavor(Inspector object, Optional<NodeFlavors> nodeFlavors) {
return nodeFlavors.map(flavorMapper -> flavorMapper.getFlavor(object.field(hostSpecFlavor).asString()))
.orElse(Optional.empty());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static ProvisionInfo fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
public ProvisionInfo merge(ProvisionInfo provisionInfo) {
Set<HostSpec> mergedSet = new LinkedHashSet<>();
mergedSet.addAll(this.hosts);
mergedSet.addAll(provisionInfo.getHosts());
return ProvisionInfo.withHosts(mergedSet);
}
} |
dockerImage is the full string e.g. "docker-registry.ops.yahoo.com:4443/vespa/ci:6.96.12", so you need to parse it | private static ClusterMembership readMembership(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembership).asString(),
object.field(hostSpecVespaVersion).valid() ?
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersion).asString()) :
( object.field(dockerImage).valid() ? com.yahoo.component.Version.fromString(object.field(dockerImage).asString()) : Vtag.currentVersion));
} | } | private static ClusterMembership readMembership(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembership).asString(),
object.field(hostSpecVespaVersion).valid() ?
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersion).asString()) :
( object.field(dockerImage).valid() ? new DockerImage(object.field(dockerImage).asString()).tagAsVersion() : Vtag.currentVersion));
} | class ProvisionInfo {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostName = "hostName";
private static final String hostSpecMembership = "membership";
private static final String hostSpecFlavor = "flavor";
private static final String hostSpecVespaVersion = "vespaVersion";
private static final String dockerImage = "dockerImage";
private final Set<HostSpec> hosts = new LinkedHashSet<>();
private ProvisionInfo(Set<HostSpec> hosts) {
this.hosts.addAll(hosts);
}
public static ProvisionInfo withHosts(Set<HostSpec> hosts) {
return new ProvisionInfo(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts) {
Cursor object = array.addObject();
serializeHostSpec(object.setObject(hostSpecKey), host);
}
}
private void serializeHostSpec(Cursor cursor, HostSpec host) {
cursor.setString(hostSpecHostName, host.hostname());
if (host.membership().isPresent()) {
cursor.setString(hostSpecMembership, host.membership().get().stringValue());
cursor.setString(hostSpecVespaVersion, host.membership().get().cluster().vespaVersion().toString());
}
if (host.flavor().isPresent())
cursor.setString(hostSpecFlavor, host.flavor().get().name());
}
public Set<HostSpec> getHosts() {
return Collections.unmodifiableSet(hosts);
}
private static ProvisionInfo fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse(new ArrayTraverser() {
@Override
public void entry(int i, Inspector inspector) {
hosts.add(deserializeHostSpec(inspector.field(hostSpecKey), nodeFlavors));
}
});
return new ProvisionInfo(hosts);
}
private static HostSpec deserializeHostSpec(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembership).valid() ? Optional.of(readMembership(object)) : Optional.empty();
Optional<Flavor> flavor =
object.field(hostSpecFlavor).valid() ? readFlavor(object, nodeFlavors) : Optional.empty();
return new HostSpec(object.field(hostSpecHostName).asString(),Collections.emptyList(), flavor, membership);
}
private static Optional<Flavor> readFlavor(Inspector object, Optional<NodeFlavors> nodeFlavors) {
return nodeFlavors.map(flavorMapper -> flavorMapper.getFlavor(object.field(hostSpecFlavor).asString()))
.orElse(Optional.empty());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static ProvisionInfo fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
public ProvisionInfo merge(ProvisionInfo provisionInfo) {
Set<HostSpec> mergedSet = new LinkedHashSet<>();
mergedSet.addAll(this.hosts);
mergedSet.addAll(provisionInfo.getHosts());
return ProvisionInfo.withHosts(mergedSet);
}
} | class ProvisionInfo {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostName = "hostName";
private static final String hostSpecMembership = "membership";
private static final String hostSpecFlavor = "flavor";
private static final String hostSpecVespaVersion = "vespaVersion";
private static final String dockerImage = "dockerImage";
private final Set<HostSpec> hosts = new LinkedHashSet<>();
private ProvisionInfo(Set<HostSpec> hosts) {
this.hosts.addAll(hosts);
}
public static ProvisionInfo withHosts(Set<HostSpec> hosts) {
return new ProvisionInfo(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts) {
Cursor object = array.addObject();
serializeHostSpec(object.setObject(hostSpecKey), host);
}
}
private void serializeHostSpec(Cursor cursor, HostSpec host) {
cursor.setString(hostSpecHostName, host.hostname());
if (host.membership().isPresent()) {
cursor.setString(hostSpecMembership, host.membership().get().stringValue());
cursor.setString(hostSpecVespaVersion, host.membership().get().cluster().vespaVersion().toString());
}
if (host.flavor().isPresent())
cursor.setString(hostSpecFlavor, host.flavor().get().name());
}
public Set<HostSpec> getHosts() {
return Collections.unmodifiableSet(hosts);
}
private static ProvisionInfo fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse(new ArrayTraverser() {
@Override
public void entry(int i, Inspector inspector) {
hosts.add(deserializeHostSpec(inspector.field(hostSpecKey), nodeFlavors));
}
});
return new ProvisionInfo(hosts);
}
private static HostSpec deserializeHostSpec(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembership).valid() ? Optional.of(readMembership(object)) : Optional.empty();
Optional<Flavor> flavor =
object.field(hostSpecFlavor).valid() ? readFlavor(object, nodeFlavors) : Optional.empty();
return new HostSpec(object.field(hostSpecHostName).asString(),Collections.emptyList(), flavor, membership);
}
private static Optional<Flavor> readFlavor(Inspector object, Optional<NodeFlavors> nodeFlavors) {
return nodeFlavors.map(flavorMapper -> flavorMapper.getFlavor(object.field(hostSpecFlavor).asString()))
.orElse(Optional.empty());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static ProvisionInfo fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
public ProvisionInfo merge(ProvisionInfo provisionInfo) {
Set<HostSpec> mergedSet = new LinkedHashSet<>();
mergedSet.addAll(this.hosts);
mergedSet.addAll(provisionInfo.getHosts());
return ProvisionInfo.withHosts(mergedSet);
}
} |
Yes, of course. Thanks! | private static ClusterMembership readMembership(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembership).asString(),
object.field(hostSpecVespaVersion).valid() ?
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersion).asString()) :
( object.field(dockerImage).valid() ? com.yahoo.component.Version.fromString(object.field(dockerImage).asString()) : Vtag.currentVersion));
} | } | private static ClusterMembership readMembership(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembership).asString(),
object.field(hostSpecVespaVersion).valid() ?
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersion).asString()) :
( object.field(dockerImage).valid() ? new DockerImage(object.field(dockerImage).asString()).tagAsVersion() : Vtag.currentVersion));
} | class ProvisionInfo {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostName = "hostName";
private static final String hostSpecMembership = "membership";
private static final String hostSpecFlavor = "flavor";
private static final String hostSpecVespaVersion = "vespaVersion";
private static final String dockerImage = "dockerImage";
private final Set<HostSpec> hosts = new LinkedHashSet<>();
private ProvisionInfo(Set<HostSpec> hosts) {
this.hosts.addAll(hosts);
}
public static ProvisionInfo withHosts(Set<HostSpec> hosts) {
return new ProvisionInfo(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts) {
Cursor object = array.addObject();
serializeHostSpec(object.setObject(hostSpecKey), host);
}
}
private void serializeHostSpec(Cursor cursor, HostSpec host) {
cursor.setString(hostSpecHostName, host.hostname());
if (host.membership().isPresent()) {
cursor.setString(hostSpecMembership, host.membership().get().stringValue());
cursor.setString(hostSpecVespaVersion, host.membership().get().cluster().vespaVersion().toString());
}
if (host.flavor().isPresent())
cursor.setString(hostSpecFlavor, host.flavor().get().name());
}
public Set<HostSpec> getHosts() {
return Collections.unmodifiableSet(hosts);
}
private static ProvisionInfo fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse(new ArrayTraverser() {
@Override
public void entry(int i, Inspector inspector) {
hosts.add(deserializeHostSpec(inspector.field(hostSpecKey), nodeFlavors));
}
});
return new ProvisionInfo(hosts);
}
private static HostSpec deserializeHostSpec(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembership).valid() ? Optional.of(readMembership(object)) : Optional.empty();
Optional<Flavor> flavor =
object.field(hostSpecFlavor).valid() ? readFlavor(object, nodeFlavors) : Optional.empty();
return new HostSpec(object.field(hostSpecHostName).asString(),Collections.emptyList(), flavor, membership);
}
private static Optional<Flavor> readFlavor(Inspector object, Optional<NodeFlavors> nodeFlavors) {
return nodeFlavors.map(flavorMapper -> flavorMapper.getFlavor(object.field(hostSpecFlavor).asString()))
.orElse(Optional.empty());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static ProvisionInfo fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
public ProvisionInfo merge(ProvisionInfo provisionInfo) {
Set<HostSpec> mergedSet = new LinkedHashSet<>();
mergedSet.addAll(this.hosts);
mergedSet.addAll(provisionInfo.getHosts());
return ProvisionInfo.withHosts(mergedSet);
}
} | class ProvisionInfo {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostName = "hostName";
private static final String hostSpecMembership = "membership";
private static final String hostSpecFlavor = "flavor";
private static final String hostSpecVespaVersion = "vespaVersion";
private static final String dockerImage = "dockerImage";
private final Set<HostSpec> hosts = new LinkedHashSet<>();
private ProvisionInfo(Set<HostSpec> hosts) {
this.hosts.addAll(hosts);
}
public static ProvisionInfo withHosts(Set<HostSpec> hosts) {
return new ProvisionInfo(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts) {
Cursor object = array.addObject();
serializeHostSpec(object.setObject(hostSpecKey), host);
}
}
private void serializeHostSpec(Cursor cursor, HostSpec host) {
cursor.setString(hostSpecHostName, host.hostname());
if (host.membership().isPresent()) {
cursor.setString(hostSpecMembership, host.membership().get().stringValue());
cursor.setString(hostSpecVespaVersion, host.membership().get().cluster().vespaVersion().toString());
}
if (host.flavor().isPresent())
cursor.setString(hostSpecFlavor, host.flavor().get().name());
}
public Set<HostSpec> getHosts() {
return Collections.unmodifiableSet(hosts);
}
private static ProvisionInfo fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse(new ArrayTraverser() {
@Override
public void entry(int i, Inspector inspector) {
hosts.add(deserializeHostSpec(inspector.field(hostSpecKey), nodeFlavors));
}
});
return new ProvisionInfo(hosts);
}
private static HostSpec deserializeHostSpec(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembership).valid() ? Optional.of(readMembership(object)) : Optional.empty();
Optional<Flavor> flavor =
object.field(hostSpecFlavor).valid() ? readFlavor(object, nodeFlavors) : Optional.empty();
return new HostSpec(object.field(hostSpecHostName).asString(),Collections.emptyList(), flavor, membership);
}
private static Optional<Flavor> readFlavor(Inspector object, Optional<NodeFlavors> nodeFlavors) {
return nodeFlavors.map(flavorMapper -> flavorMapper.getFlavor(object.field(hostSpecFlavor).asString()))
.orElse(Optional.empty());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static ProvisionInfo fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
public ProvisionInfo merge(ProvisionInfo provisionInfo) {
Set<HostSpec> mergedSet = new LinkedHashSet<>();
mergedSet.addAll(this.hosts);
mergedSet.addAll(provisionInfo.getHosts());
return ProvisionInfo.withHosts(mergedSet);
}
} |
Great, I didn't notice this. | Set<HostSpec> getHostSpecs() {
return getHosts().stream()
.map(host -> new HostSpec(host.getHostName(), Collections.emptyList(),
host.getFlavor(), host.primaryClusterMembership()))
.collect(Collectors.toCollection(LinkedHashSet::new));
} | return getHosts().stream() | Set<HostSpec> getHostSpecs() {
return getHosts().stream()
.map(host -> new HostSpec(host.getHostName(), Collections.emptyList(),
host.getFlavor(), host.primaryClusterMembership()))
.collect(Collectors.toCollection(LinkedHashSet::new));
} | class HostSystem extends AbstractConfigProducer<Host> {
private static Logger log = Logger.getLogger(HostSystem.class.getName());
private Map<String,String> ipAddresses = new LinkedHashMap<>();
private Map<String,String> hostnames = new LinkedHashMap<>();
private final Map<String, HostResource> hostname2host = new LinkedHashMap<>();
private final HostProvisioner provisioner;
public HostSystem(AbstractConfigProducer parent, String name, HostProvisioner provisioner) {
super(parent, name);
this.provisioner = provisioner;
}
/**
* Returns the host with the given hostname.
*
* @param name the hostname of the host.
* @return the host with the given hostname.
*/
public HostResource getHostByHostname(String name) {
if ("localhost.fortestingpurposesonly".equals(name)) {
String localhost = "localhost";
if ( ! getChildren().containsKey(localhost)) {
new Host(this, localhost);
}
return new HostResource(getChildren().get(localhost));
}
return hostname2host.get(name);
}
/**
* Returns the canonical name of a given host. This will cache names for faster lookup.
*
* @param hostname the hostname to retrieve the canonical hostname for.
* @return The canonical hostname, or null if unable to resolve.
* @throws UnknownHostException if the hostname cannot be resolved
*/
public String getCanonicalHostname(String hostname) throws UnknownHostException {
if ( ! hostnames.containsKey(hostname)) {
hostnames.put(hostname, lookupCanonicalHostname(hostname));
}
return hostnames.get(hostname);
}
/**
* Static helper method that looks up the canonical name of a given host.
*
* @param hostname the hostname to retrieve the canonical hostname for.
* @return The canonical hostname, or null if unable to resolve.
* @throws UnknownHostException if the hostname cannot be resolved
*/
public static String lookupCanonicalHostname(String hostname) throws UnknownHostException {
return java.net.InetAddress.getByName(hostname).getCanonicalHostName();
}
/**
* Returns the if address of a host.
*
* @param hostname the hostname to retrieve the ip address for.
* @return The string representation of the ip-address.
*/
public String getIp(String hostname) {
if (ipAddresses.containsKey(hostname)) return ipAddresses.get(hostname);
String ipAddress;
if (hostname.startsWith(MockRoot.MOCKHOST)) {
ipAddress = "0.0.0.0";
} else {
try {
InetAddress address = InetAddress.getByName(hostname);
ipAddress = address.getHostAddress();
} catch (java.net.UnknownHostException e) {
log.warning("Unable to find valid IP address of host: " + hostname);
ipAddress = "0.0.0.0";
}
}
ipAddresses.put(hostname, ipAddress);
return ipAddress;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (HostResource host : hostname2host.values()) {
sb.append(host).append(",");
}
if (sb.length() > 0) sb.deleteCharAt(sb.length() - 1);
return sb.toString();
}
public HostResource getHost(String hostAlias) {
HostSpec hostSpec = provisioner.allocateHost(hostAlias);
for (HostResource resource : hostname2host.values()) {
if (resource.getHostName().equals(hostSpec.hostname())) {
hostSpec.membership().ifPresent(resource::addClusterMembership);
return resource;
}
}
return addNewHost(hostSpec);
}
private HostResource addNewHost(HostSpec hostSpec) {
Host host = new Host(this, hostSpec.hostname());
HostResource hostResource = new HostResource(host);
hostResource.setFlavor(hostSpec.flavor());
hostSpec.membership().ifPresent(hostResource::addClusterMembership);
hostname2host.put(host.getHostName(), hostResource);
log.log(DEBUG, () -> "Added new host resource for " + host.getHostName() + " with flavor " + hostResource.getFlavor());
return hostResource;
}
/** Returns the hosts owned by the application having this system - i.e all hosts except config servers */
public List<HostResource> getHosts() {
return hostname2host.values().stream()
.filter(host -> !host.getHost().runsConfigServer())
.collect(Collectors.toList());
}
public Map<HostResource, ClusterMembership> allocateHosts(ClusterSpec cluster, Capacity capacity, int groups, DeployLogger logger) {
List<HostSpec> allocatedHosts = provisioner.prepare(cluster, capacity, groups, new ProvisionDeployLogger(logger));
Map<HostResource, ClusterMembership> retAllocatedHosts = new LinkedHashMap<>();
for (HostSpec spec : allocatedHosts) {
HostResource host = getExistingHost(spec).orElseGet(() -> addNewHost(spec));
retAllocatedHosts.put(host, spec.membership().orElse(null));
if (! host.getFlavor().isPresent()) {
host.setFlavor(spec.flavor());
log.log(DEBUG, () -> "Host resource " + host.getHostName() + " had no flavor, setting to " + spec.flavor());
}
}
retAllocatedHosts.keySet().forEach(host -> log.log(DEBUG, () -> "Allocated host " + host.getHostName() + " with flavor " + host.getFlavor()));
return retAllocatedHosts;
}
private Optional<HostResource> getExistingHost(HostSpec key) {
List<HostResource> hosts = hostname2host.values().stream()
.filter(resource -> resource.getHostName().equals(key.hostname()))
.collect(Collectors.toList());
if (hosts.isEmpty()) {
return Optional.empty();
} else {
log.log(DEBUG, () -> "Found existing host resource for " + key.hostname() + " with flavor " + hosts.get(0).getFlavor());
return Optional.of(hosts.get(0));
}
}
public void addBoundHost(HostResource host) {
hostname2host.put(host.getHostName(), host);
}
/** A provision logger which forwards to a deploy logger */
private static class ProvisionDeployLogger implements ProvisionLogger {
private final DeployLogger deployLogger;
public ProvisionDeployLogger(DeployLogger deployLogger) {
this.deployLogger = deployLogger;
}
@Override
public void log(Level level, String message) {
deployLogger.log(level, message);
}
}
} | class HostSystem extends AbstractConfigProducer<Host> {
private static Logger log = Logger.getLogger(HostSystem.class.getName());
private Map<String,String> ipAddresses = new LinkedHashMap<>();
private Map<String,String> hostnames = new LinkedHashMap<>();
private final Map<String, HostResource> hostname2host = new LinkedHashMap<>();
private final HostProvisioner provisioner;
public HostSystem(AbstractConfigProducer parent, String name, HostProvisioner provisioner) {
super(parent, name);
this.provisioner = provisioner;
}
/**
* Returns the host with the given hostname.
*
* @param name the hostname of the host.
* @return the host with the given hostname.
*/
public HostResource getHostByHostname(String name) {
if ("localhost.fortestingpurposesonly".equals(name)) {
String localhost = "localhost";
if ( ! getChildren().containsKey(localhost)) {
new Host(this, localhost);
}
return new HostResource(getChildren().get(localhost));
}
return hostname2host.get(name);
}
/**
* Returns the canonical name of a given host. This will cache names for faster lookup.
*
* @param hostname the hostname to retrieve the canonical hostname for.
* @return The canonical hostname, or null if unable to resolve.
* @throws UnknownHostException if the hostname cannot be resolved
*/
public String getCanonicalHostname(String hostname) throws UnknownHostException {
if ( ! hostnames.containsKey(hostname)) {
hostnames.put(hostname, lookupCanonicalHostname(hostname));
}
return hostnames.get(hostname);
}
/**
* Static helper method that looks up the canonical name of a given host.
*
* @param hostname the hostname to retrieve the canonical hostname for.
* @return The canonical hostname, or null if unable to resolve.
* @throws UnknownHostException if the hostname cannot be resolved
*/
public static String lookupCanonicalHostname(String hostname) throws UnknownHostException {
return java.net.InetAddress.getByName(hostname).getCanonicalHostName();
}
/**
* Returns the if address of a host.
*
* @param hostname the hostname to retrieve the ip address for.
* @return The string representation of the ip-address.
*/
public String getIp(String hostname) {
if (ipAddresses.containsKey(hostname)) return ipAddresses.get(hostname);
String ipAddress;
if (hostname.startsWith(MockRoot.MOCKHOST)) {
ipAddress = "0.0.0.0";
} else {
try {
InetAddress address = InetAddress.getByName(hostname);
ipAddress = address.getHostAddress();
} catch (java.net.UnknownHostException e) {
log.warning("Unable to find valid IP address of host: " + hostname);
ipAddress = "0.0.0.0";
}
}
ipAddresses.put(hostname, ipAddress);
return ipAddress;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (HostResource host : hostname2host.values()) {
sb.append(host).append(",");
}
if (sb.length() > 0) sb.deleteCharAt(sb.length() - 1);
return sb.toString();
}
public HostResource getHost(String hostAlias) {
HostSpec hostSpec = provisioner.allocateHost(hostAlias);
for (HostResource resource : hostname2host.values()) {
if (resource.getHostName().equals(hostSpec.hostname())) {
hostSpec.membership().ifPresent(resource::addClusterMembership);
return resource;
}
}
return addNewHost(hostSpec);
}
private HostResource addNewHost(HostSpec hostSpec) {
Host host = new Host(this, hostSpec.hostname());
HostResource hostResource = new HostResource(host);
hostResource.setFlavor(hostSpec.flavor());
hostSpec.membership().ifPresent(hostResource::addClusterMembership);
hostname2host.put(host.getHostName(), hostResource);
log.log(DEBUG, () -> "Added new host resource for " + host.getHostName() + " with flavor " + hostResource.getFlavor());
return hostResource;
}
/** Returns the hosts owned by the application having this system - i.e all hosts except config servers */
public List<HostResource> getHosts() {
return hostname2host.values().stream()
.filter(host -> !host.getHost().runsConfigServer())
.collect(Collectors.toList());
}
public Map<HostResource, ClusterMembership> allocateHosts(ClusterSpec cluster, Capacity capacity, int groups, DeployLogger logger) {
List<HostSpec> allocatedHosts = provisioner.prepare(cluster, capacity, groups, new ProvisionDeployLogger(logger));
Map<HostResource, ClusterMembership> retAllocatedHosts = new LinkedHashMap<>();
for (HostSpec spec : allocatedHosts) {
HostResource host = getExistingHost(spec).orElseGet(() -> addNewHost(spec));
retAllocatedHosts.put(host, spec.membership().orElse(null));
if (! host.getFlavor().isPresent()) {
host.setFlavor(spec.flavor());
log.log(DEBUG, () -> "Host resource " + host.getHostName() + " had no flavor, setting to " + spec.flavor());
}
}
retAllocatedHosts.keySet().forEach(host -> log.log(DEBUG, () -> "Allocated host " + host.getHostName() + " with flavor " + host.getFlavor()));
return retAllocatedHosts;
}
private Optional<HostResource> getExistingHost(HostSpec key) {
List<HostResource> hosts = hostname2host.values().stream()
.filter(resource -> resource.getHostName().equals(key.hostname()))
.collect(Collectors.toList());
if (hosts.isEmpty()) {
return Optional.empty();
} else {
log.log(DEBUG, () -> "Found existing host resource for " + key.hostname() + " with flavor " + hosts.get(0).getFlavor());
return Optional.of(hosts.get(0));
}
}
public void addBoundHost(HostResource host) {
hostname2host.put(host.getHostName(), host);
}
/** A provision logger which forwards to a deploy logger */
private static class ProvisionDeployLogger implements ProvisionLogger {
private final DeployLogger deployLogger;
public ProvisionDeployLogger(DeployLogger deployLogger) {
this.deployLogger = deployLogger;
}
@Override
public void log(Level level, String message) {
deployLogger.log(level, message);
}
}
} |
Please collect to LinkedHashSet to preserve order. | public Set<HostInfo> getHosts() {
return root.getHostSystem().getHosts().stream().map(HostResource::getHostInfo).collect(Collectors.toSet());
} | return root.getHostSystem().getHosts().stream().map(HostResource::getHostInfo).collect(Collectors.toSet()); | public Set<HostInfo> getHosts() {
return root.getHostSystem().getHosts().stream()
.map(HostResource::getHostInfo)
.collect(Collectors.toCollection(LinkedHashSet::new));
} | class VespaModel extends AbstractConfigProducerRoot implements Serializable, Model {
private static final long serialVersionUID = 1L;
public static final Logger log = Logger.getLogger(VespaModel.class.getPackage().toString());
private ConfigModelRepo configModelRepo = new ConfigModelRepo();
private final Optional<ProvisionInfo> info;
/**
* The config id for the root config producer
*/
public static final String ROOT_CONFIGID = "";
private ApplicationConfigProducerRoot root = null;
/**
* Generic service instances - service clusters which have no specific model
*/
private List<ServiceCluster> serviceClusters = new ArrayList<>();
private DeployState deployState;
/** The validation overrides of this. This is never null. */
private final ValidationOverrides validationOverrides;
private final FileDistributor fileDistributor;
/** Creates a Vespa Model from internal model types only */
public VespaModel(ApplicationPackage app) throws IOException, SAXException {
this(app, new NullConfigModelRegistry());
}
/** Creates a Vespa Model from internal model types only */
public VespaModel(DeployState deployState) throws IOException, SAXException {
this(new NullConfigModelRegistry(), deployState);
}
/**
* Constructs vespa model using config given in app
*
* @param app the application to create a model from
* @param configModelRegistry a registry of config model "main" classes which may be used
* to instantiate config models
*/
public VespaModel(ApplicationPackage app, ConfigModelRegistry configModelRegistry) throws IOException, SAXException {
this(configModelRegistry, new DeployState.Builder().applicationPackage(app).build());
}
/**
* Constructs vespa model using config given in app
*
* @param configModelRegistry a registry of config model "main" classes which may be used
* to instantiate config models
* @param deployState the global deploy state to use for this model.
*/
public VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState) throws IOException, SAXException {
this(configModelRegistry, deployState, true, null);
}
private VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState, boolean complete, FileDistributor fileDistributor) throws IOException, SAXException {
super("vespamodel");
this.deployState = deployState;
this.validationOverrides = deployState.validationOverrides();
configModelRegistry = new VespaConfigModelRegistry(configModelRegistry);
VespaModelBuilder builder = new VespaDomBuilder();
root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this);
if (complete) {
configModelRepo.readConfigModels(deployState, builder, root, configModelRegistry);
addServiceClusters(deployState.getApplicationPackage(), builder);
this.info = Optional.of(createProvisionInfo());
setupRouting();
this.fileDistributor = root.getFileDistributionConfigProducer().getFileDistributor();
getAdmin().addPerHostServices(getHostSystem().getHosts(), deployState.getProperties());
freezeModelTopology();
root.prepare(configModelRepo);
configModelRepo.prepareConfigModels();
validateWrapExceptions();
this.deployState = null;
}
else {
this.info = Optional.of(createProvisionInfo());
this.fileDistributor = fileDistributor;
}
}
/** Creates a mutable model with no services instantiated */
public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException {
return new VespaModel(new NullConfigModelRegistry(), deployState, false, new FileDistributor(deployState.getFileRegistry()));
}
private ProvisionInfo createProvisionInfo() {
return ProvisionInfo.withHosts(root.getHostSystem().getHostSpecs());
}
private void validateWrapExceptions() {
try {
validate();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error while validating model:", e);
}
}
/** Adds generic application specific clusters of services */
private void addServiceClusters(ApplicationPackage app, VespaModelBuilder builder) {
for (ServiceCluster sc : builder.getClusters(app, this))
serviceClusters.add(sc);
}
private void setupRouting() {
root.setupRouting(configModelRepo);
}
/** Returns the one and only HostSystem of this VespaModel */
public HostSystem getHostSystem() {
return root.getHostSystem();
}
/** Return a collection of all hostnames used in this application */
@Override
public FileDistributor getFileDistributor() {
return fileDistributor;
}
/** Returns this models Vespa instance */
public ApplicationConfigProducerRoot getVespa() { return root; }
@Override
public boolean allowModelVersionMismatch() {
return validationOverrides.allows(ValidationId.configModelVersionMismatch) ||
validationOverrides.allows(ValidationId.skipOldConfigModels);
}
@Override
public boolean skipOldConfigModels() {
return validationOverrides.allows(ValidationId.skipOldConfigModels);
}
/**
* Resolves config of the given type and config id, by first instantiating the correct {@link com.yahoo.config.ConfigInstance.Builder},
* calling {@link
* types in the model.
*
* @param clazz The type of config
* @param configId The config id
* @return A config instance of the given type
*/
public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> clazz, String configId) {
try {
ConfigInstance.Builder builder = newBuilder(clazz);
getConfig(builder, configId);
return newConfigInstance(clazz, builder);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Populates an instance of configClass with config produced by configProducer.
*/
public static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ConfigProducer configProducer) {
try {
Builder builder = newBuilder(configClass);
populateConfigBuilder(builder, configProducer);
return newConfigInstance(configClass, builder);
} catch (Exception e) {
throw new RuntimeException("Failed getting config for class " + configClass.getName(), e);
}
}
private static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE newConfigInstance(Class<CONFIGTYPE> configClass, Builder builder)
throws NoSuchMethodException, InstantiationException, IllegalAccessException, java.lang.reflect.InvocationTargetException {
Constructor<CONFIGTYPE> constructor = configClass.getConstructor(builder.getClass());
return constructor.newInstance(builder);
}
private static Builder newBuilder(Class<? extends ConfigInstance> configClass)
throws ClassNotFoundException, InstantiationException, IllegalAccessException {
Class builderClazz = configClass.getClassLoader().loadClass(configClass.getName() + "$Builder");
return (Builder)builderClazz.newInstance();
}
/**
* Throw if the config id does not exist in the model.
*
* @param configId a config id
*/
protected void checkId(String configId) {
if ( ! id2producer.containsKey(configId)) {
log.log(LogLevel.DEBUG, "Invalid config id: " + configId);
}
}
/**
* Resolves config for a given config id and populates the given builder with the config.
*
* @param builder a configinstance builder
* @param configId the config id for the config client
* @return the builder if a producer was found, and it did apply config, null otherwise
*/
@SuppressWarnings("unchecked")
@Override
public ConfigInstance.Builder getConfig(ConfigInstance.Builder builder, String configId) {
checkId(configId);
Optional<ConfigProducer> configProducer = getConfigProducer(configId);
if ( ! configProducer.isPresent()) return null;
populateConfigBuilder(builder, configProducer.get());
return builder;
}
private static void populateConfigBuilder(Builder builder, ConfigProducer configProducer) {
boolean found = configProducer.cascadeConfig(builder);
boolean foundOverride = configProducer.addUserConfig(builder);
if (logDebug()) {
log.log(LogLevel.DEBUG, "Trying to get config for " + builder.getClass().getDeclaringClass().getName() +
" for config id " + quote(configProducer.getConfigId()) +
", found=" + found + ", foundOverride=" + foundOverride);
}
}
/**
* Resolve config for a given key and config definition
*
* @param configKey The key to resolve.
* @param targetDef The config definition to use for the schema
* @return The payload as a list of strings
*/
@Override
public ConfigPayload getConfig(ConfigKey configKey, com.yahoo.vespa.config.buildergen.ConfigDefinition targetDef) {
ConfigBuilder builder = InstanceResolver.resolveToBuilder(configKey, this, targetDef);
if (builder != null) {
log.log(LogLevel.DEBUG, () -> "Found builder for " + configKey);
ConfigPayload payload;
InnerCNode innerCNode = targetDef != null ? targetDef.getCNode() : null;
if (builder instanceof GenericConfig.GenericConfigBuilder) {
payload = getConfigFromGenericBuilder(builder);
} else {
payload = getConfigFromBuilder(configKey, builder, innerCNode);
}
return (innerCNode != null) ? payload.applyDefaultsFromDef(innerCNode) : payload;
}
return null;
}
private ConfigPayload getConfigFromBuilder(ConfigKey configKey, ConfigBuilder builder, InnerCNode targetDef) {
try {
ConfigInstance instance = InstanceResolver.resolveToInstance(configKey, builder, targetDef);
log.log(LogLevel.DEBUG, () -> "getConfigFromBuilder for " + configKey + ",instance=" + instance);
return ConfigPayload.fromInstance(instance);
} catch (ConfigurationRuntimeException e) {
log.log(LogLevel.INFO, "Error resolving instance for key '" + configKey + "', returning empty config: " + Exceptions.toMessageString(e));
return ConfigPayload.fromBuilder(new ConfigPayloadBuilder());
}
}
private ConfigPayload getConfigFromGenericBuilder(ConfigBuilder builder) {
return ((GenericConfig.GenericConfigBuilder) builder).getPayload();
}
@Override
public Set<ConfigKey<?>> allConfigsProduced() {
Set<ConfigKey<?>> keySet = new LinkedHashSet<>();
for (ConfigProducer producer : id2producer().values()) {
keySet.addAll(configsProduced(producer));
}
return keySet;
}
public ConfigInstance.Builder createBuilder(ConfigDefinitionKey key, ConfigDefinition targetDef) {
String className = ConfigGenerator.createClassName(key.getName());
Class<?> clazz;
final String fullClassName = InstanceResolver.packageName(key) + "." + className;
final String builderName = fullClassName + "$Builder";
final String producerName = fullClassName + "$Producer";
ClassLoader classLoader = getConfigClassLoader(producerName);
if (classLoader == null) {
classLoader = getClass().getClassLoader();
if (logDebug()) {
log.log(LogLevel.DEBUG, "No producer found to get classloader from for " + fullClassName + ". Using default");
}
}
try {
clazz = classLoader.loadClass(builderName);
} catch (ClassNotFoundException e) {
if (logDebug()) {
log.log(LogLevel.DEBUG, "Tried to load " + builderName + ", not found, trying with generic builder");
}
return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder());
}
Object i;
try {
i = clazz.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new ConfigurationRuntimeException(e);
}
if (!(i instanceof ConfigInstance.Builder)) {
throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance.Builder, can not produce config for the name '" + key.getName() + "'.");
}
return (ConfigInstance.Builder) i;
}
private static boolean logDebug() {
return log.isLoggable(LogLevel.DEBUG);
}
/**
* The set of all config ids present
* @return set of config ids
*/
public Set<String> allConfigIds() {
return id2producer.keySet();
}
@Override
public void distributeFiles(FileDistribution fileDistribution) {
getFileDistributor().sendDeployedFiles(fileDistribution);
}
@Override
public void reloadDeployFileDistributor(FileDistribution fileDistribution) {
getFileDistributor().reloadDeployFileDistributor(fileDistribution);
}
@Override
public Optional<ProvisionInfo> getProvisionInfo() {
return info;
}
private static Set<ConfigKey<?>> configsProduced(ConfigProducer cp) {
Set<ConfigKey<?>> ret = ReflectionUtil.configsProducedByInterface(cp.getClass(), cp.getConfigId());
UserConfigRepo userConfigs = cp.getUserConfigs();
for (ConfigDefinitionKey userKey : userConfigs.configsProduced()) {
ret.add(new ConfigKey<>(userKey.getName(), cp.getConfigId(), userKey.getNamespace()));
}
return ret;
}
@Override
public DeployState getDeployState() {
if (deployState == null)
throw new IllegalStateException("Cannot call getDeployState() once model has been built");
return deployState;
}
/**
* @return an unmodifiable copy of the set of configIds in this VespaModel.
*/
public Set<String> getConfigIds() {
return Collections.unmodifiableSet(id2producer.keySet());
}
/**
* Returns the admin component of the vespamodel.
*
* @return Admin
*/
public Admin getAdmin() {
return root.getAdmin();
}
/**
* Adds the descendant (at any depth level), so it can be looked up
* on configId in the Map.
*
* @param configId the id to register with, not necessarily equal to descendant.getConfigId().
* @param descendant The configProducer descendant to add
*/
public void addDescendant(String configId, AbstractConfigProducer descendant) {
if (id2producer.containsKey(configId)) {
throw new RuntimeException
("Config ID '" + configId + "' cannot be reserved by an instance of class '" +
descendant.getClass().getName() +
"' since it is already used by an instance of class '" +
id2producer.get(configId).getClass().getName() +
"'. (This is commonly caused by service/node index " +
"collisions in the config.)");
}
id2producer.put(configId, descendant);
}
/**
* Writes MODEL.cfg files for all config producers.
*
* @param baseDirectory dir to write files to
*/
public void writeFiles(File baseDirectory) throws IOException {
super.writeFiles(baseDirectory);
for (ConfigProducer cp : id2producer.values()) {
try {
File destination = new File(baseDirectory, cp.getConfigId().replace("/", File.separator));
cp.writeFiles(destination);
} catch (IOException e) {
throw new IOException(cp.getConfigId() + ": " + e.getMessage());
}
}
}
public Clients getClients() {
return configModelRepo.getClients();
}
/** Returns all search clusters, both in Search and Content */
public List<AbstractSearchCluster> getSearchClusters() {
return Content.getSearchClusters(configModelRepo());
}
/** Returns a map of content clusters by ID */
public Map<String, ContentCluster> getContentClusters() {
Map<String, ContentCluster> clusters = new LinkedHashMap<>();
for (Content model : configModelRepo.getModels(Content.class)) {
clusters.put(model.getId(), model.getCluster());
}
return Collections.unmodifiableMap(clusters);
}
/** Returns a map of container clusters by ID */
public Map<String, ContainerCluster> getContainerClusters() {
Map<String, ContainerCluster> clusters = new LinkedHashMap<>();
for (ContainerModel model : configModelRepo.getModels(ContainerModel.class)) {
clusters.put(model.getId(), model.getCluster());
}
return Collections.unmodifiableMap(clusters);
}
/** Returns the routing config model. This might be null. */
public Routing getRouting() {
return configModelRepo.getRouting();
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
return root.getFileDistributionConfigProducer();
}
/** The clusters of application specific generic services */
public List<ServiceCluster> serviceClusters() {
return serviceClusters;
}
/** Returns an unmodifiable view of the mapping of config id to {@link ConfigProducer} */
public Map<String, ConfigProducer> id2producer() {
return Collections.unmodifiableMap(id2producer);
}
/**
* @return this root's model repository
*/
public ConfigModelRepo configModelRepo() {
return configModelRepo;
}
@Override
public DeployLogger deployLogger() {
return getDeployState().getDeployLogger();
}
} | class VespaModel extends AbstractConfigProducerRoot implements Serializable, Model {
private static final long serialVersionUID = 1L;
public static final Logger log = Logger.getLogger(VespaModel.class.getPackage().toString());
private ConfigModelRepo configModelRepo = new ConfigModelRepo();
private final Optional<ProvisionInfo> info;
/**
* The config id for the root config producer
*/
public static final String ROOT_CONFIGID = "";
private ApplicationConfigProducerRoot root = null;
/**
* Generic service instances - service clusters which have no specific model
*/
private List<ServiceCluster> serviceClusters = new ArrayList<>();
private DeployState deployState;
/** The validation overrides of this. This is never null. */
private final ValidationOverrides validationOverrides;
private final FileDistributor fileDistributor;
/** Creates a Vespa Model from internal model types only */
public VespaModel(ApplicationPackage app) throws IOException, SAXException {
this(app, new NullConfigModelRegistry());
}
/** Creates a Vespa Model from internal model types only */
public VespaModel(DeployState deployState) throws IOException, SAXException {
this(new NullConfigModelRegistry(), deployState);
}
/**
* Constructs vespa model using config given in app
*
* @param app the application to create a model from
* @param configModelRegistry a registry of config model "main" classes which may be used
* to instantiate config models
*/
public VespaModel(ApplicationPackage app, ConfigModelRegistry configModelRegistry) throws IOException, SAXException {
this(configModelRegistry, new DeployState.Builder().applicationPackage(app).build());
}
/**
* Constructs vespa model using config given in app
*
* @param configModelRegistry a registry of config model "main" classes which may be used
* to instantiate config models
* @param deployState the global deploy state to use for this model.
*/
public VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState) throws IOException, SAXException {
this(configModelRegistry, deployState, true, null);
}
private VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState, boolean complete, FileDistributor fileDistributor) throws IOException, SAXException {
super("vespamodel");
this.deployState = deployState;
this.validationOverrides = deployState.validationOverrides();
configModelRegistry = new VespaConfigModelRegistry(configModelRegistry);
VespaModelBuilder builder = new VespaDomBuilder();
root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this);
if (complete) {
configModelRepo.readConfigModels(deployState, builder, root, configModelRegistry);
addServiceClusters(deployState.getApplicationPackage(), builder);
this.info = Optional.of(createProvisionInfo());
setupRouting();
this.fileDistributor = root.getFileDistributionConfigProducer().getFileDistributor();
getAdmin().addPerHostServices(getHostSystem().getHosts(), deployState.getProperties());
freezeModelTopology();
root.prepare(configModelRepo);
configModelRepo.prepareConfigModels();
validateWrapExceptions();
this.deployState = null;
}
else {
this.info = Optional.of(createProvisionInfo());
this.fileDistributor = fileDistributor;
}
}
/** Creates a mutable model with no services instantiated */
public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException {
return new VespaModel(new NullConfigModelRegistry(), deployState, false, new FileDistributor(deployState.getFileRegistry()));
}
private ProvisionInfo createProvisionInfo() {
return ProvisionInfo.withHosts(root.getHostSystem().getHostSpecs());
}
private void validateWrapExceptions() {
try {
validate();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error while validating model:", e);
}
}
/** Adds generic application specific clusters of services */
private void addServiceClusters(ApplicationPackage app, VespaModelBuilder builder) {
for (ServiceCluster sc : builder.getClusters(app, this))
serviceClusters.add(sc);
}
private void setupRouting() {
root.setupRouting(configModelRepo);
}
/** Returns the one and only HostSystem of this VespaModel */
public HostSystem getHostSystem() {
return root.getHostSystem();
}
/** Return a collection of all hostnames used in this application */
@Override
public FileDistributor getFileDistributor() {
return fileDistributor;
}
/** Returns this models Vespa instance */
public ApplicationConfigProducerRoot getVespa() { return root; }
@Override
public boolean allowModelVersionMismatch() {
return validationOverrides.allows(ValidationId.configModelVersionMismatch) ||
validationOverrides.allows(ValidationId.skipOldConfigModels);
}
@Override
public boolean skipOldConfigModels() {
return validationOverrides.allows(ValidationId.skipOldConfigModels);
}
/**
* Resolves config of the given type and config id, by first instantiating the correct {@link com.yahoo.config.ConfigInstance.Builder},
* calling {@link
* types in the model.
*
* @param clazz The type of config
* @param configId The config id
* @return A config instance of the given type
*/
public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> clazz, String configId) {
try {
ConfigInstance.Builder builder = newBuilder(clazz);
getConfig(builder, configId);
return newConfigInstance(clazz, builder);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Populates an instance of configClass with config produced by configProducer.
*/
public static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ConfigProducer configProducer) {
try {
Builder builder = newBuilder(configClass);
populateConfigBuilder(builder, configProducer);
return newConfigInstance(configClass, builder);
} catch (Exception e) {
throw new RuntimeException("Failed getting config for class " + configClass.getName(), e);
}
}
private static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE newConfigInstance(Class<CONFIGTYPE> configClass, Builder builder)
throws NoSuchMethodException, InstantiationException, IllegalAccessException, java.lang.reflect.InvocationTargetException {
Constructor<CONFIGTYPE> constructor = configClass.getConstructor(builder.getClass());
return constructor.newInstance(builder);
}
private static Builder newBuilder(Class<? extends ConfigInstance> configClass)
throws ClassNotFoundException, InstantiationException, IllegalAccessException {
Class builderClazz = configClass.getClassLoader().loadClass(configClass.getName() + "$Builder");
return (Builder)builderClazz.newInstance();
}
/**
* Throw if the config id does not exist in the model.
*
* @param configId a config id
*/
protected void checkId(String configId) {
if ( ! id2producer.containsKey(configId)) {
log.log(LogLevel.DEBUG, "Invalid config id: " + configId);
}
}
/**
* Resolves config for a given config id and populates the given builder with the config.
*
* @param builder a configinstance builder
* @param configId the config id for the config client
* @return the builder if a producer was found, and it did apply config, null otherwise
*/
@SuppressWarnings("unchecked")
@Override
public ConfigInstance.Builder getConfig(ConfigInstance.Builder builder, String configId) {
checkId(configId);
Optional<ConfigProducer> configProducer = getConfigProducer(configId);
if ( ! configProducer.isPresent()) return null;
populateConfigBuilder(builder, configProducer.get());
return builder;
}
private static void populateConfigBuilder(Builder builder, ConfigProducer configProducer) {
boolean found = configProducer.cascadeConfig(builder);
boolean foundOverride = configProducer.addUserConfig(builder);
if (logDebug()) {
log.log(LogLevel.DEBUG, "Trying to get config for " + builder.getClass().getDeclaringClass().getName() +
" for config id " + quote(configProducer.getConfigId()) +
", found=" + found + ", foundOverride=" + foundOverride);
}
}
/**
* Resolve config for a given key and config definition
*
* @param configKey The key to resolve.
* @param targetDef The config definition to use for the schema
* @return The payload as a list of strings
*/
@Override
public ConfigPayload getConfig(ConfigKey configKey, com.yahoo.vespa.config.buildergen.ConfigDefinition targetDef) {
ConfigBuilder builder = InstanceResolver.resolveToBuilder(configKey, this, targetDef);
if (builder != null) {
log.log(LogLevel.DEBUG, () -> "Found builder for " + configKey);
ConfigPayload payload;
InnerCNode innerCNode = targetDef != null ? targetDef.getCNode() : null;
if (builder instanceof GenericConfig.GenericConfigBuilder) {
payload = getConfigFromGenericBuilder(builder);
} else {
payload = getConfigFromBuilder(configKey, builder, innerCNode);
}
return (innerCNode != null) ? payload.applyDefaultsFromDef(innerCNode) : payload;
}
return null;
}
private ConfigPayload getConfigFromBuilder(ConfigKey configKey, ConfigBuilder builder, InnerCNode targetDef) {
try {
ConfigInstance instance = InstanceResolver.resolveToInstance(configKey, builder, targetDef);
log.log(LogLevel.DEBUG, () -> "getConfigFromBuilder for " + configKey + ",instance=" + instance);
return ConfigPayload.fromInstance(instance);
} catch (ConfigurationRuntimeException e) {
log.log(LogLevel.INFO, "Error resolving instance for key '" + configKey + "', returning empty config: " + Exceptions.toMessageString(e));
return ConfigPayload.fromBuilder(new ConfigPayloadBuilder());
}
}
private ConfigPayload getConfigFromGenericBuilder(ConfigBuilder builder) {
return ((GenericConfig.GenericConfigBuilder) builder).getPayload();
}
@Override
public Set<ConfigKey<?>> allConfigsProduced() {
Set<ConfigKey<?>> keySet = new LinkedHashSet<>();
for (ConfigProducer producer : id2producer().values()) {
keySet.addAll(configsProduced(producer));
}
return keySet;
}
public ConfigInstance.Builder createBuilder(ConfigDefinitionKey key, ConfigDefinition targetDef) {
String className = ConfigGenerator.createClassName(key.getName());
Class<?> clazz;
final String fullClassName = InstanceResolver.packageName(key) + "." + className;
final String builderName = fullClassName + "$Builder";
final String producerName = fullClassName + "$Producer";
ClassLoader classLoader = getConfigClassLoader(producerName);
if (classLoader == null) {
classLoader = getClass().getClassLoader();
if (logDebug()) {
log.log(LogLevel.DEBUG, "No producer found to get classloader from for " + fullClassName + ". Using default");
}
}
try {
clazz = classLoader.loadClass(builderName);
} catch (ClassNotFoundException e) {
if (logDebug()) {
log.log(LogLevel.DEBUG, "Tried to load " + builderName + ", not found, trying with generic builder");
}
return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder());
}
Object i;
try {
i = clazz.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new ConfigurationRuntimeException(e);
}
if (!(i instanceof ConfigInstance.Builder)) {
throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance.Builder, can not produce config for the name '" + key.getName() + "'.");
}
return (ConfigInstance.Builder) i;
}
private static boolean logDebug() {
return log.isLoggable(LogLevel.DEBUG);
}
/**
* The set of all config ids present
* @return set of config ids
*/
public Set<String> allConfigIds() {
return id2producer.keySet();
}
@Override
public void distributeFiles(FileDistribution fileDistribution) {
getFileDistributor().sendDeployedFiles(fileDistribution);
}
@Override
public void reloadDeployFileDistributor(FileDistribution fileDistribution) {
getFileDistributor().reloadDeployFileDistributor(fileDistribution);
}
@Override
public Optional<ProvisionInfo> getProvisionInfo() {
return info;
}
private static Set<ConfigKey<?>> configsProduced(ConfigProducer cp) {
Set<ConfigKey<?>> ret = ReflectionUtil.configsProducedByInterface(cp.getClass(), cp.getConfigId());
UserConfigRepo userConfigs = cp.getUserConfigs();
for (ConfigDefinitionKey userKey : userConfigs.configsProduced()) {
ret.add(new ConfigKey<>(userKey.getName(), cp.getConfigId(), userKey.getNamespace()));
}
return ret;
}
@Override
public DeployState getDeployState() {
if (deployState == null)
throw new IllegalStateException("Cannot call getDeployState() once model has been built");
return deployState;
}
/**
* @return an unmodifiable copy of the set of configIds in this VespaModel.
*/
public Set<String> getConfigIds() {
return Collections.unmodifiableSet(id2producer.keySet());
}
/**
* Returns the admin component of the vespamodel.
*
* @return Admin
*/
public Admin getAdmin() {
return root.getAdmin();
}
/**
* Adds the descendant (at any depth level), so it can be looked up
* on configId in the Map.
*
* @param configId the id to register with, not necessarily equal to descendant.getConfigId().
* @param descendant The configProducer descendant to add
*/
public void addDescendant(String configId, AbstractConfigProducer descendant) {
if (id2producer.containsKey(configId)) {
throw new RuntimeException
("Config ID '" + configId + "' cannot be reserved by an instance of class '" +
descendant.getClass().getName() +
"' since it is already used by an instance of class '" +
id2producer.get(configId).getClass().getName() +
"'. (This is commonly caused by service/node index " +
"collisions in the config.)");
}
id2producer.put(configId, descendant);
}
/**
* Writes MODEL.cfg files for all config producers.
*
* @param baseDirectory dir to write files to
*/
public void writeFiles(File baseDirectory) throws IOException {
super.writeFiles(baseDirectory);
for (ConfigProducer cp : id2producer.values()) {
try {
File destination = new File(baseDirectory, cp.getConfigId().replace("/", File.separator));
cp.writeFiles(destination);
} catch (IOException e) {
throw new IOException(cp.getConfigId() + ": " + e.getMessage());
}
}
}
public Clients getClients() {
return configModelRepo.getClients();
}
/** Returns all search clusters, both in Search and Content */
public List<AbstractSearchCluster> getSearchClusters() {
return Content.getSearchClusters(configModelRepo());
}
/** Returns a map of content clusters by ID */
public Map<String, ContentCluster> getContentClusters() {
Map<String, ContentCluster> clusters = new LinkedHashMap<>();
for (Content model : configModelRepo.getModels(Content.class)) {
clusters.put(model.getId(), model.getCluster());
}
return Collections.unmodifiableMap(clusters);
}
/** Returns a map of container clusters by ID */
public Map<String, ContainerCluster> getContainerClusters() {
Map<String, ContainerCluster> clusters = new LinkedHashMap<>();
for (ContainerModel model : configModelRepo.getModels(ContainerModel.class)) {
clusters.put(model.getId(), model.getCluster());
}
return Collections.unmodifiableMap(clusters);
}
/** Returns the routing config model. This might be null. */
public Routing getRouting() {
return configModelRepo.getRouting();
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
return root.getFileDistributionConfigProducer();
}
/** The clusters of application specific generic services */
public List<ServiceCluster> serviceClusters() {
return serviceClusters;
}
/** Returns an unmodifiable view of the mapping of config id to {@link ConfigProducer} */
public Map<String, ConfigProducer> id2producer() {
return Collections.unmodifiableMap(id2producer);
}
/**
* @return this root's model repository
*/
public ConfigModelRepo configModelRepo() {
return configModelRepo;
}
@Override
public DeployLogger deployLogger() {
return getDeployState().getDeployLogger();
}
} |
Yes, of course, thanks, will fix | public Set<HostInfo> getHosts() {
return root.getHostSystem().getHosts().stream().map(HostResource::getHostInfo).collect(Collectors.toSet());
} | return root.getHostSystem().getHosts().stream().map(HostResource::getHostInfo).collect(Collectors.toSet()); | public Set<HostInfo> getHosts() {
return root.getHostSystem().getHosts().stream()
.map(HostResource::getHostInfo)
.collect(Collectors.toCollection(LinkedHashSet::new));
} | class VespaModel extends AbstractConfigProducerRoot implements Serializable, Model {
private static final long serialVersionUID = 1L;
public static final Logger log = Logger.getLogger(VespaModel.class.getPackage().toString());
private ConfigModelRepo configModelRepo = new ConfigModelRepo();
private final Optional<ProvisionInfo> info;
/**
* The config id for the root config producer
*/
public static final String ROOT_CONFIGID = "";
private ApplicationConfigProducerRoot root = null;
/**
* Generic service instances - service clusters which have no specific model
*/
private List<ServiceCluster> serviceClusters = new ArrayList<>();
private DeployState deployState;
/** The validation overrides of this. This is never null. */
private final ValidationOverrides validationOverrides;
private final FileDistributor fileDistributor;
/** Creates a Vespa Model from internal model types only */
public VespaModel(ApplicationPackage app) throws IOException, SAXException {
this(app, new NullConfigModelRegistry());
}
/** Creates a Vespa Model from internal model types only */
public VespaModel(DeployState deployState) throws IOException, SAXException {
this(new NullConfigModelRegistry(), deployState);
}
/**
* Constructs vespa model using config given in app
*
* @param app the application to create a model from
* @param configModelRegistry a registry of config model "main" classes which may be used
* to instantiate config models
*/
public VespaModel(ApplicationPackage app, ConfigModelRegistry configModelRegistry) throws IOException, SAXException {
this(configModelRegistry, new DeployState.Builder().applicationPackage(app).build());
}
/**
* Constructs vespa model using config given in app
*
* @param configModelRegistry a registry of config model "main" classes which may be used
* to instantiate config models
* @param deployState the global deploy state to use for this model.
*/
public VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState) throws IOException, SAXException {
this(configModelRegistry, deployState, true, null);
}
private VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState, boolean complete, FileDistributor fileDistributor) throws IOException, SAXException {
super("vespamodel");
this.deployState = deployState;
this.validationOverrides = deployState.validationOverrides();
configModelRegistry = new VespaConfigModelRegistry(configModelRegistry);
VespaModelBuilder builder = new VespaDomBuilder();
root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this);
if (complete) {
configModelRepo.readConfigModels(deployState, builder, root, configModelRegistry);
addServiceClusters(deployState.getApplicationPackage(), builder);
this.info = Optional.of(createProvisionInfo());
setupRouting();
this.fileDistributor = root.getFileDistributionConfigProducer().getFileDistributor();
getAdmin().addPerHostServices(getHostSystem().getHosts(), deployState.getProperties());
freezeModelTopology();
root.prepare(configModelRepo);
configModelRepo.prepareConfigModels();
validateWrapExceptions();
this.deployState = null;
}
else {
this.info = Optional.of(createProvisionInfo());
this.fileDistributor = fileDistributor;
}
}
/** Creates a mutable model with no services instantiated */
public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException {
return new VespaModel(new NullConfigModelRegistry(), deployState, false, new FileDistributor(deployState.getFileRegistry()));
}
private ProvisionInfo createProvisionInfo() {
return ProvisionInfo.withHosts(root.getHostSystem().getHostSpecs());
}
private void validateWrapExceptions() {
try {
validate();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error while validating model:", e);
}
}
/** Adds generic application specific clusters of services */
private void addServiceClusters(ApplicationPackage app, VespaModelBuilder builder) {
for (ServiceCluster sc : builder.getClusters(app, this))
serviceClusters.add(sc);
}
private void setupRouting() {
root.setupRouting(configModelRepo);
}
/** Returns the one and only HostSystem of this VespaModel */
public HostSystem getHostSystem() {
return root.getHostSystem();
}
/** Return a collection of all hostnames used in this application */
@Override
public FileDistributor getFileDistributor() {
return fileDistributor;
}
/** Returns this models Vespa instance */
public ApplicationConfigProducerRoot getVespa() { return root; }
@Override
public boolean allowModelVersionMismatch() {
return validationOverrides.allows(ValidationId.configModelVersionMismatch) ||
validationOverrides.allows(ValidationId.skipOldConfigModels);
}
@Override
public boolean skipOldConfigModels() {
return validationOverrides.allows(ValidationId.skipOldConfigModels);
}
/**
* Resolves config of the given type and config id, by first instantiating the correct {@link com.yahoo.config.ConfigInstance.Builder},
* calling {@link
* types in the model.
*
* @param clazz The type of config
* @param configId The config id
* @return A config instance of the given type
*/
public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> clazz, String configId) {
try {
ConfigInstance.Builder builder = newBuilder(clazz);
getConfig(builder, configId);
return newConfigInstance(clazz, builder);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Populates an instance of configClass with config produced by configProducer.
*/
public static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ConfigProducer configProducer) {
try {
Builder builder = newBuilder(configClass);
populateConfigBuilder(builder, configProducer);
return newConfigInstance(configClass, builder);
} catch (Exception e) {
throw new RuntimeException("Failed getting config for class " + configClass.getName(), e);
}
}
private static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE newConfigInstance(Class<CONFIGTYPE> configClass, Builder builder)
throws NoSuchMethodException, InstantiationException, IllegalAccessException, java.lang.reflect.InvocationTargetException {
Constructor<CONFIGTYPE> constructor = configClass.getConstructor(builder.getClass());
return constructor.newInstance(builder);
}
private static Builder newBuilder(Class<? extends ConfigInstance> configClass)
throws ClassNotFoundException, InstantiationException, IllegalAccessException {
Class builderClazz = configClass.getClassLoader().loadClass(configClass.getName() + "$Builder");
return (Builder)builderClazz.newInstance();
}
/**
* Throw if the config id does not exist in the model.
*
* @param configId a config id
*/
protected void checkId(String configId) {
if ( ! id2producer.containsKey(configId)) {
log.log(LogLevel.DEBUG, "Invalid config id: " + configId);
}
}
/**
* Resolves config for a given config id and populates the given builder with the config.
*
* @param builder a configinstance builder
* @param configId the config id for the config client
* @return the builder if a producer was found, and it did apply config, null otherwise
*/
@SuppressWarnings("unchecked")
@Override
public ConfigInstance.Builder getConfig(ConfigInstance.Builder builder, String configId) {
checkId(configId);
Optional<ConfigProducer> configProducer = getConfigProducer(configId);
if ( ! configProducer.isPresent()) return null;
populateConfigBuilder(builder, configProducer.get());
return builder;
}
private static void populateConfigBuilder(Builder builder, ConfigProducer configProducer) {
boolean found = configProducer.cascadeConfig(builder);
boolean foundOverride = configProducer.addUserConfig(builder);
if (logDebug()) {
log.log(LogLevel.DEBUG, "Trying to get config for " + builder.getClass().getDeclaringClass().getName() +
" for config id " + quote(configProducer.getConfigId()) +
", found=" + found + ", foundOverride=" + foundOverride);
}
}
/**
* Resolve config for a given key and config definition
*
* @param configKey The key to resolve.
* @param targetDef The config definition to use for the schema
* @return The payload as a list of strings
*/
@Override
public ConfigPayload getConfig(ConfigKey configKey, com.yahoo.vespa.config.buildergen.ConfigDefinition targetDef) {
ConfigBuilder builder = InstanceResolver.resolveToBuilder(configKey, this, targetDef);
if (builder != null) {
log.log(LogLevel.DEBUG, () -> "Found builder for " + configKey);
ConfigPayload payload;
InnerCNode innerCNode = targetDef != null ? targetDef.getCNode() : null;
if (builder instanceof GenericConfig.GenericConfigBuilder) {
payload = getConfigFromGenericBuilder(builder);
} else {
payload = getConfigFromBuilder(configKey, builder, innerCNode);
}
return (innerCNode != null) ? payload.applyDefaultsFromDef(innerCNode) : payload;
}
return null;
}
private ConfigPayload getConfigFromBuilder(ConfigKey configKey, ConfigBuilder builder, InnerCNode targetDef) {
try {
ConfigInstance instance = InstanceResolver.resolveToInstance(configKey, builder, targetDef);
log.log(LogLevel.DEBUG, () -> "getConfigFromBuilder for " + configKey + ",instance=" + instance);
return ConfigPayload.fromInstance(instance);
} catch (ConfigurationRuntimeException e) {
log.log(LogLevel.INFO, "Error resolving instance for key '" + configKey + "', returning empty config: " + Exceptions.toMessageString(e));
return ConfigPayload.fromBuilder(new ConfigPayloadBuilder());
}
}
private ConfigPayload getConfigFromGenericBuilder(ConfigBuilder builder) {
return ((GenericConfig.GenericConfigBuilder) builder).getPayload();
}
@Override
public Set<ConfigKey<?>> allConfigsProduced() {
Set<ConfigKey<?>> keySet = new LinkedHashSet<>();
for (ConfigProducer producer : id2producer().values()) {
keySet.addAll(configsProduced(producer));
}
return keySet;
}
public ConfigInstance.Builder createBuilder(ConfigDefinitionKey key, ConfigDefinition targetDef) {
String className = ConfigGenerator.createClassName(key.getName());
Class<?> clazz;
final String fullClassName = InstanceResolver.packageName(key) + "." + className;
final String builderName = fullClassName + "$Builder";
final String producerName = fullClassName + "$Producer";
ClassLoader classLoader = getConfigClassLoader(producerName);
if (classLoader == null) {
classLoader = getClass().getClassLoader();
if (logDebug()) {
log.log(LogLevel.DEBUG, "No producer found to get classloader from for " + fullClassName + ". Using default");
}
}
try {
clazz = classLoader.loadClass(builderName);
} catch (ClassNotFoundException e) {
if (logDebug()) {
log.log(LogLevel.DEBUG, "Tried to load " + builderName + ", not found, trying with generic builder");
}
return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder());
}
Object i;
try {
i = clazz.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new ConfigurationRuntimeException(e);
}
if (!(i instanceof ConfigInstance.Builder)) {
throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance.Builder, can not produce config for the name '" + key.getName() + "'.");
}
return (ConfigInstance.Builder) i;
}
private static boolean logDebug() {
return log.isLoggable(LogLevel.DEBUG);
}
/**
* The set of all config ids present
* @return set of config ids
*/
public Set<String> allConfigIds() {
return id2producer.keySet();
}
@Override
public void distributeFiles(FileDistribution fileDistribution) {
getFileDistributor().sendDeployedFiles(fileDistribution);
}
@Override
public void reloadDeployFileDistributor(FileDistribution fileDistribution) {
getFileDistributor().reloadDeployFileDistributor(fileDistribution);
}
@Override
public Optional<ProvisionInfo> getProvisionInfo() {
return info;
}
private static Set<ConfigKey<?>> configsProduced(ConfigProducer cp) {
Set<ConfigKey<?>> ret = ReflectionUtil.configsProducedByInterface(cp.getClass(), cp.getConfigId());
UserConfigRepo userConfigs = cp.getUserConfigs();
for (ConfigDefinitionKey userKey : userConfigs.configsProduced()) {
ret.add(new ConfigKey<>(userKey.getName(), cp.getConfigId(), userKey.getNamespace()));
}
return ret;
}
@Override
public DeployState getDeployState() {
if (deployState == null)
throw new IllegalStateException("Cannot call getDeployState() once model has been built");
return deployState;
}
/**
* @return an unmodifiable copy of the set of configIds in this VespaModel.
*/
public Set<String> getConfigIds() {
return Collections.unmodifiableSet(id2producer.keySet());
}
/**
* Returns the admin component of the vespamodel.
*
* @return Admin
*/
public Admin getAdmin() {
return root.getAdmin();
}
/**
* Adds the descendant (at any depth level), so it can be looked up
* on configId in the Map.
*
* @param configId the id to register with, not necessarily equal to descendant.getConfigId().
* @param descendant The configProducer descendant to add
*/
public void addDescendant(String configId, AbstractConfigProducer descendant) {
if (id2producer.containsKey(configId)) {
throw new RuntimeException
("Config ID '" + configId + "' cannot be reserved by an instance of class '" +
descendant.getClass().getName() +
"' since it is already used by an instance of class '" +
id2producer.get(configId).getClass().getName() +
"'. (This is commonly caused by service/node index " +
"collisions in the config.)");
}
id2producer.put(configId, descendant);
}
/**
* Writes MODEL.cfg files for all config producers.
*
* @param baseDirectory dir to write files to
*/
public void writeFiles(File baseDirectory) throws IOException {
super.writeFiles(baseDirectory);
for (ConfigProducer cp : id2producer.values()) {
try {
File destination = new File(baseDirectory, cp.getConfigId().replace("/", File.separator));
cp.writeFiles(destination);
} catch (IOException e) {
throw new IOException(cp.getConfigId() + ": " + e.getMessage());
}
}
}
public Clients getClients() {
return configModelRepo.getClients();
}
/** Returns all search clusters, both in Search and Content */
public List<AbstractSearchCluster> getSearchClusters() {
return Content.getSearchClusters(configModelRepo());
}
/** Returns a map of content clusters by ID */
public Map<String, ContentCluster> getContentClusters() {
Map<String, ContentCluster> clusters = new LinkedHashMap<>();
for (Content model : configModelRepo.getModels(Content.class)) {
clusters.put(model.getId(), model.getCluster());
}
return Collections.unmodifiableMap(clusters);
}
/** Returns a map of container clusters by ID */
public Map<String, ContainerCluster> getContainerClusters() {
Map<String, ContainerCluster> clusters = new LinkedHashMap<>();
for (ContainerModel model : configModelRepo.getModels(ContainerModel.class)) {
clusters.put(model.getId(), model.getCluster());
}
return Collections.unmodifiableMap(clusters);
}
/** Returns the routing config model. This might be null. */
public Routing getRouting() {
return configModelRepo.getRouting();
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
return root.getFileDistributionConfigProducer();
}
/** The clusters of application specific generic services */
public List<ServiceCluster> serviceClusters() {
return serviceClusters;
}
/** Returns an unmodifiable view of the mapping of config id to {@link ConfigProducer} */
public Map<String, ConfigProducer> id2producer() {
return Collections.unmodifiableMap(id2producer);
}
/**
* @return this root's model repository
*/
public ConfigModelRepo configModelRepo() {
return configModelRepo;
}
@Override
public DeployLogger deployLogger() {
return getDeployState().getDeployLogger();
}
} | class VespaModel extends AbstractConfigProducerRoot implements Serializable, Model {
private static final long serialVersionUID = 1L;
public static final Logger log = Logger.getLogger(VespaModel.class.getPackage().toString());
private ConfigModelRepo configModelRepo = new ConfigModelRepo();
private final Optional<ProvisionInfo> info;
/**
* The config id for the root config producer
*/
public static final String ROOT_CONFIGID = "";
private ApplicationConfigProducerRoot root = null;
/**
* Generic service instances - service clusters which have no specific model
*/
private List<ServiceCluster> serviceClusters = new ArrayList<>();
private DeployState deployState;
/** The validation overrides of this. This is never null. */
private final ValidationOverrides validationOverrides;
private final FileDistributor fileDistributor;
/** Creates a Vespa Model from internal model types only */
public VespaModel(ApplicationPackage app) throws IOException, SAXException {
this(app, new NullConfigModelRegistry());
}
/** Creates a Vespa Model from internal model types only */
public VespaModel(DeployState deployState) throws IOException, SAXException {
this(new NullConfigModelRegistry(), deployState);
}
/**
* Constructs vespa model using config given in app
*
* @param app the application to create a model from
* @param configModelRegistry a registry of config model "main" classes which may be used
* to instantiate config models
*/
public VespaModel(ApplicationPackage app, ConfigModelRegistry configModelRegistry) throws IOException, SAXException {
this(configModelRegistry, new DeployState.Builder().applicationPackage(app).build());
}
/**
* Constructs vespa model using config given in app
*
* @param configModelRegistry a registry of config model "main" classes which may be used
* to instantiate config models
* @param deployState the global deploy state to use for this model.
*/
public VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState) throws IOException, SAXException {
this(configModelRegistry, deployState, true, null);
}
private VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState, boolean complete, FileDistributor fileDistributor) throws IOException, SAXException {
super("vespamodel");
this.deployState = deployState;
this.validationOverrides = deployState.validationOverrides();
configModelRegistry = new VespaConfigModelRegistry(configModelRegistry);
VespaModelBuilder builder = new VespaDomBuilder();
root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this);
if (complete) {
configModelRepo.readConfigModels(deployState, builder, root, configModelRegistry);
addServiceClusters(deployState.getApplicationPackage(), builder);
this.info = Optional.of(createProvisionInfo());
setupRouting();
this.fileDistributor = root.getFileDistributionConfigProducer().getFileDistributor();
getAdmin().addPerHostServices(getHostSystem().getHosts(), deployState.getProperties());
freezeModelTopology();
root.prepare(configModelRepo);
configModelRepo.prepareConfigModels();
validateWrapExceptions();
this.deployState = null;
}
else {
this.info = Optional.of(createProvisionInfo());
this.fileDistributor = fileDistributor;
}
}
/** Creates a mutable model with no services instantiated */
public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException {
return new VespaModel(new NullConfigModelRegistry(), deployState, false, new FileDistributor(deployState.getFileRegistry()));
}
private ProvisionInfo createProvisionInfo() {
return ProvisionInfo.withHosts(root.getHostSystem().getHostSpecs());
}
private void validateWrapExceptions() {
try {
validate();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error while validating model:", e);
}
}
/** Adds generic application specific clusters of services */
private void addServiceClusters(ApplicationPackage app, VespaModelBuilder builder) {
for (ServiceCluster sc : builder.getClusters(app, this))
serviceClusters.add(sc);
}
private void setupRouting() {
root.setupRouting(configModelRepo);
}
/** Returns the one and only HostSystem of this VespaModel */
public HostSystem getHostSystem() {
return root.getHostSystem();
}
/** Return a collection of all hostnames used in this application */
@Override
public FileDistributor getFileDistributor() {
return fileDistributor;
}
/** Returns this models Vespa instance */
public ApplicationConfigProducerRoot getVespa() { return root; }
@Override
public boolean allowModelVersionMismatch() {
return validationOverrides.allows(ValidationId.configModelVersionMismatch) ||
validationOverrides.allows(ValidationId.skipOldConfigModels);
}
@Override
public boolean skipOldConfigModels() {
return validationOverrides.allows(ValidationId.skipOldConfigModels);
}
/**
* Resolves config of the given type and config id, by first instantiating the correct {@link com.yahoo.config.ConfigInstance.Builder},
* calling {@link
* types in the model.
*
* @param clazz The type of config
* @param configId The config id
* @return A config instance of the given type
*/
public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> clazz, String configId) {
try {
ConfigInstance.Builder builder = newBuilder(clazz);
getConfig(builder, configId);
return newConfigInstance(clazz, builder);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Populates an instance of configClass with config produced by configProducer.
*/
public static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ConfigProducer configProducer) {
try {
Builder builder = newBuilder(configClass);
populateConfigBuilder(builder, configProducer);
return newConfigInstance(configClass, builder);
} catch (Exception e) {
throw new RuntimeException("Failed getting config for class " + configClass.getName(), e);
}
}
private static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE newConfigInstance(Class<CONFIGTYPE> configClass, Builder builder)
throws NoSuchMethodException, InstantiationException, IllegalAccessException, java.lang.reflect.InvocationTargetException {
Constructor<CONFIGTYPE> constructor = configClass.getConstructor(builder.getClass());
return constructor.newInstance(builder);
}
private static Builder newBuilder(Class<? extends ConfigInstance> configClass)
throws ClassNotFoundException, InstantiationException, IllegalAccessException {
Class builderClazz = configClass.getClassLoader().loadClass(configClass.getName() + "$Builder");
return (Builder)builderClazz.newInstance();
}
/**
* Throw if the config id does not exist in the model.
*
* @param configId a config id
*/
protected void checkId(String configId) {
if ( ! id2producer.containsKey(configId)) {
log.log(LogLevel.DEBUG, "Invalid config id: " + configId);
}
}
/**
* Resolves config for a given config id and populates the given builder with the config.
*
* @param builder a configinstance builder
* @param configId the config id for the config client
* @return the builder if a producer was found, and it did apply config, null otherwise
*/
@SuppressWarnings("unchecked")
@Override
public ConfigInstance.Builder getConfig(ConfigInstance.Builder builder, String configId) {
checkId(configId);
Optional<ConfigProducer> configProducer = getConfigProducer(configId);
if ( ! configProducer.isPresent()) return null;
populateConfigBuilder(builder, configProducer.get());
return builder;
}
private static void populateConfigBuilder(Builder builder, ConfigProducer configProducer) {
boolean found = configProducer.cascadeConfig(builder);
boolean foundOverride = configProducer.addUserConfig(builder);
if (logDebug()) {
log.log(LogLevel.DEBUG, "Trying to get config for " + builder.getClass().getDeclaringClass().getName() +
" for config id " + quote(configProducer.getConfigId()) +
", found=" + found + ", foundOverride=" + foundOverride);
}
}
/**
* Resolve config for a given key and config definition
*
* @param configKey The key to resolve.
* @param targetDef The config definition to use for the schema
* @return The payload as a list of strings
*/
@Override
public ConfigPayload getConfig(ConfigKey configKey, com.yahoo.vespa.config.buildergen.ConfigDefinition targetDef) {
ConfigBuilder builder = InstanceResolver.resolveToBuilder(configKey, this, targetDef);
if (builder != null) {
log.log(LogLevel.DEBUG, () -> "Found builder for " + configKey);
ConfigPayload payload;
InnerCNode innerCNode = targetDef != null ? targetDef.getCNode() : null;
if (builder instanceof GenericConfig.GenericConfigBuilder) {
payload = getConfigFromGenericBuilder(builder);
} else {
payload = getConfigFromBuilder(configKey, builder, innerCNode);
}
return (innerCNode != null) ? payload.applyDefaultsFromDef(innerCNode) : payload;
}
return null;
}
private ConfigPayload getConfigFromBuilder(ConfigKey configKey, ConfigBuilder builder, InnerCNode targetDef) {
try {
ConfigInstance instance = InstanceResolver.resolveToInstance(configKey, builder, targetDef);
log.log(LogLevel.DEBUG, () -> "getConfigFromBuilder for " + configKey + ",instance=" + instance);
return ConfigPayload.fromInstance(instance);
} catch (ConfigurationRuntimeException e) {
log.log(LogLevel.INFO, "Error resolving instance for key '" + configKey + "', returning empty config: " + Exceptions.toMessageString(e));
return ConfigPayload.fromBuilder(new ConfigPayloadBuilder());
}
}
private ConfigPayload getConfigFromGenericBuilder(ConfigBuilder builder) {
return ((GenericConfig.GenericConfigBuilder) builder).getPayload();
}
@Override
public Set<ConfigKey<?>> allConfigsProduced() {
Set<ConfigKey<?>> keySet = new LinkedHashSet<>();
for (ConfigProducer producer : id2producer().values()) {
keySet.addAll(configsProduced(producer));
}
return keySet;
}
public ConfigInstance.Builder createBuilder(ConfigDefinitionKey key, ConfigDefinition targetDef) {
String className = ConfigGenerator.createClassName(key.getName());
Class<?> clazz;
final String fullClassName = InstanceResolver.packageName(key) + "." + className;
final String builderName = fullClassName + "$Builder";
final String producerName = fullClassName + "$Producer";
ClassLoader classLoader = getConfigClassLoader(producerName);
if (classLoader == null) {
classLoader = getClass().getClassLoader();
if (logDebug()) {
log.log(LogLevel.DEBUG, "No producer found to get classloader from for " + fullClassName + ". Using default");
}
}
try {
clazz = classLoader.loadClass(builderName);
} catch (ClassNotFoundException e) {
if (logDebug()) {
log.log(LogLevel.DEBUG, "Tried to load " + builderName + ", not found, trying with generic builder");
}
return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder());
}
Object i;
try {
i = clazz.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new ConfigurationRuntimeException(e);
}
if (!(i instanceof ConfigInstance.Builder)) {
throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance.Builder, can not produce config for the name '" + key.getName() + "'.");
}
return (ConfigInstance.Builder) i;
}
private static boolean logDebug() {
return log.isLoggable(LogLevel.DEBUG);
}
/**
* The set of all config ids present
* @return set of config ids
*/
public Set<String> allConfigIds() {
return id2producer.keySet();
}
@Override
public void distributeFiles(FileDistribution fileDistribution) {
getFileDistributor().sendDeployedFiles(fileDistribution);
}
@Override
public void reloadDeployFileDistributor(FileDistribution fileDistribution) {
getFileDistributor().reloadDeployFileDistributor(fileDistribution);
}
@Override
public Optional<ProvisionInfo> getProvisionInfo() {
return info;
}
private static Set<ConfigKey<?>> configsProduced(ConfigProducer cp) {
Set<ConfigKey<?>> ret = ReflectionUtil.configsProducedByInterface(cp.getClass(), cp.getConfigId());
UserConfigRepo userConfigs = cp.getUserConfigs();
for (ConfigDefinitionKey userKey : userConfigs.configsProduced()) {
ret.add(new ConfigKey<>(userKey.getName(), cp.getConfigId(), userKey.getNamespace()));
}
return ret;
}
@Override
public DeployState getDeployState() {
if (deployState == null)
throw new IllegalStateException("Cannot call getDeployState() once model has been built");
return deployState;
}
/**
* @return an unmodifiable copy of the set of configIds in this VespaModel.
*/
public Set<String> getConfigIds() {
return Collections.unmodifiableSet(id2producer.keySet());
}
/**
* Returns the admin component of the vespamodel.
*
* @return Admin
*/
public Admin getAdmin() {
return root.getAdmin();
}
/**
* Adds the descendant (at any depth level), so it can be looked up
* on configId in the Map.
*
* @param configId the id to register with, not necessarily equal to descendant.getConfigId().
* @param descendant The configProducer descendant to add
*/
public void addDescendant(String configId, AbstractConfigProducer descendant) {
if (id2producer.containsKey(configId)) {
throw new RuntimeException
("Config ID '" + configId + "' cannot be reserved by an instance of class '" +
descendant.getClass().getName() +
"' since it is already used by an instance of class '" +
id2producer.get(configId).getClass().getName() +
"'. (This is commonly caused by service/node index " +
"collisions in the config.)");
}
id2producer.put(configId, descendant);
}
/**
* Writes MODEL.cfg files for all config producers.
*
* @param baseDirectory dir to write files to
*/
public void writeFiles(File baseDirectory) throws IOException {
super.writeFiles(baseDirectory);
for (ConfigProducer cp : id2producer.values()) {
try {
File destination = new File(baseDirectory, cp.getConfigId().replace("/", File.separator));
cp.writeFiles(destination);
} catch (IOException e) {
throw new IOException(cp.getConfigId() + ": " + e.getMessage());
}
}
}
public Clients getClients() {
return configModelRepo.getClients();
}
/** Returns all search clusters, both in Search and Content */
public List<AbstractSearchCluster> getSearchClusters() {
return Content.getSearchClusters(configModelRepo());
}
/** Returns a map of content clusters by ID */
public Map<String, ContentCluster> getContentClusters() {
Map<String, ContentCluster> clusters = new LinkedHashMap<>();
for (Content model : configModelRepo.getModels(Content.class)) {
clusters.put(model.getId(), model.getCluster());
}
return Collections.unmodifiableMap(clusters);
}
/** Returns a map of container clusters by ID */
public Map<String, ContainerCluster> getContainerClusters() {
Map<String, ContainerCluster> clusters = new LinkedHashMap<>();
for (ContainerModel model : configModelRepo.getModels(ContainerModel.class)) {
clusters.put(model.getId(), model.getCluster());
}
return Collections.unmodifiableMap(clusters);
}
/** Returns the routing config model. This might be null. */
public Routing getRouting() {
return configModelRepo.getRouting();
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
return root.getFileDistributionConfigProducer();
}
/** The clusters of application specific generic services */
public List<ServiceCluster> serviceClusters() {
return serviceClusters;
}
/** Returns an unmodifiable view of the mapping of config id to {@link ConfigProducer} */
public Map<String, ConfigProducer> id2producer() {
return Collections.unmodifiableMap(id2producer);
}
/**
* @return this root's model repository
*/
public ConfigModelRepo configModelRepo() {
return configModelRepo;
}
@Override
public DeployLogger deployLogger() {
return getDeployState().getDeployLogger();
}
} |
Easier to use ImmutableMap.of("type1", Arrays.asList("f2") that returns a map. | public void testThatAttributesConfigIsProducedForIndexed() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test/type1"), expectedAttributesMap);
} | expectedAttributesMap.put("type1", Arrays.asList("f2")); | public void testThatAttributesConfigIsProducedForIndexed() {
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test/type1"),
ImmutableMap.of("type1", Arrays.asList("f2", "f2_nfa")));
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(1, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(1, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(1, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(2, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f4", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(1).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
@Test
public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test.type1/type1"), expectedAttributesMap);
}
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"), Arrays.asList("test/search"), Collections.emptyMap());
}
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(2, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
assertEquals(attributeField+"_nfa", acfg.attribute(1).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(2, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
assertEquals("f2_nfa", rac1.attribute(1).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(2, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
assertEquals("f4_nfa", rac2.attribute(1).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(4, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f2_nfa", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(2).name());
assertEquals("f4_nfa", acfg.attribute(3).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
@Test
public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test.type1/type1"),
ImmutableMap.of("type1", Arrays.asList("f2")));
}
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"),
Arrays.asList("test/search"), Collections.emptyMap());
}
} |
Easier to use ImmutableMap.of("type1", Arrays.asList("f2") that returns a map. | public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test.type1/type1"), expectedAttributesMap);
} | expectedAttributesMap.put("type1", Arrays.asList("f2")); | public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test.type1/type1"),
ImmutableMap.of("type1", Arrays.asList("f2")));
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(1, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(1, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(1, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(2, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f4", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(1).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
public void testThatAttributesConfigIsProducedForIndexed() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test/type1"), expectedAttributesMap);
}
@Test
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"), Arrays.asList("test/search"), Collections.emptyMap());
}
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(2, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
assertEquals(attributeField+"_nfa", acfg.attribute(1).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(2, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
assertEquals("f2_nfa", rac1.attribute(1).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(2, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
assertEquals("f4_nfa", rac2.attribute(1).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(4, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f2_nfa", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(2).name());
assertEquals("f4_nfa", acfg.attribute(3).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
public void testThatAttributesConfigIsProducedForIndexed() {
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test/type1"),
ImmutableMap.of("type1", Arrays.asList("f2", "f2_nfa")));
}
@Test
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"),
Arrays.asList("test/search"), Collections.emptyMap());
}
} |
We should also test that attribute without fast-access is not present in streaming mode. | public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test.type1/type1"), expectedAttributesMap);
} | expectedAttributesMap.put("type1", Arrays.asList("f2")); | public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test.type1/type1"),
ImmutableMap.of("type1", Arrays.asList("f2")));
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(1, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(1, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(1, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(2, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f4", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(1).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
public void testThatAttributesConfigIsProducedForIndexed() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test/type1"), expectedAttributesMap);
}
@Test
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"), Arrays.asList("test/search"), Collections.emptyMap());
}
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(2, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
assertEquals(attributeField+"_nfa", acfg.attribute(1).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(2, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
assertEquals("f2_nfa", rac1.attribute(1).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(2, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
assertEquals("f4_nfa", rac2.attribute(1).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(4, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f2_nfa", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(2).name());
assertEquals("f4_nfa", acfg.attribute(3).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
public void testThatAttributesConfigIsProducedForIndexed() {
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test/type1"),
ImmutableMap.of("type1", Arrays.asList("f2", "f2_nfa")));
}
@Test
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"),
Arrays.asList("test/search"), Collections.emptyMap());
}
} |
Changed. | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | .forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName())); | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
Fixed | public void testThatAttributesConfigIsProducedForIndexed() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test/type1"), expectedAttributesMap);
} | expectedAttributesMap.put("type1", Arrays.asList("f2")); | public void testThatAttributesConfigIsProducedForIndexed() {
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test/type1"),
ImmutableMap.of("type1", Arrays.asList("f2", "f2_nfa")));
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(1, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(1, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(1, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(2, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f4", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(1).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
@Test
public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test.type1/type1"), expectedAttributesMap);
}
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"), Arrays.asList("test/search"), Collections.emptyMap());
}
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(2, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
assertEquals(attributeField+"_nfa", acfg.attribute(1).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(2, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
assertEquals("f2_nfa", rac1.attribute(1).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(2, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
assertEquals("f4_nfa", rac2.attribute(1).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(4, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f2_nfa", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(2).name());
assertEquals("f4_nfa", acfg.attribute(3).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
@Test
public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test.type1/type1"),
ImmutableMap.of("type1", Arrays.asList("f2")));
}
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"),
Arrays.asList("test/search"), Collections.emptyMap());
}
} |
Very good point, forgotten both to implement and test.. | public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test.type1/type1"), expectedAttributesMap);
} | expectedAttributesMap.put("type1", Arrays.asList("f2")); | public void testThatAttributesConfigIsProducedForStreamingForFastAccessFields() {
assertAttributesConfigIndependentOfMode("streaming", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test.type1/type1"),
ImmutableMap.of("type1", Arrays.asList("f2")));
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(1, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(1, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(1, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(2, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f4", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(1).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
public void testThatAttributesConfigIsProducedForIndexed() {
Map<String, List<String>> expectedAttributesMap = new HashMap<>();
expectedAttributesMap.put("type1", Arrays.asList("f2"));
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"), Arrays.asList("test/search/cluster.test/type1"), expectedAttributesMap);
}
@Test
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"), Arrays.asList("test/search"), Collections.emptyMap());
}
} | class DocumentDatabaseTestCase {
private String vespaHosts = "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts> " +
" <host name='foo'>" +
" <alias>node0</alias>" +
" </host>" +
"</hosts>";
private String createVespaServices(List<String> sdNames, String mode) {
StringBuilder retval = new StringBuilder();
retval.append("" +
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
"<admin version='2.0'>\n" +
" <adminserver hostalias='node0' />\n" +
"</admin>\n" +
"<container version='1.0'>\n" +
" <nodes>\n" +
" <node hostalias='node0'/>\n" +
" </nodes>\n" +
" <search/>\n" +
"</container>\n" +
"<content version='1.0' id='test'>\n" +
" <redundancy>1</redundancy>\n");
retval.append(" <documents>\n");
for (String sdName : sdNames) {
retval.append("").append(" <document type='").append(sdName).append("' mode='").append(mode).append("'");
retval.append("/>\n");
}
retval.append(" </documents>\n");
retval.append("" +
" <nodes>\n" +
" <node hostalias='node0' distribution-key='0'/>\n" +
" </nodes>\n" +
"</content>\n" +
"</services>\n");
return retval.toString();
}
private ProtonConfig getProtonCfg(ContentSearchCluster cluster) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
cluster.getConfig(pb);
return new ProtonConfig(pb);
}
private void assertSingleSD(String mode) {
final List<String> sds = Arrays.asList("type1");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
assertEquals(1, indexedSearchCluster.getDocumentDbs().size());
String type1Id = "test/search/cluster.test/type1";
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(1, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
}
@Test
public void requireThatWeCanHaveOneSDForIndexedMode() throws IOException, SAXException, ParseException {
assertSingleSD("index");
}
private void assertDocTypeConfig(VespaModel model, String configId, String indexField, String attributeField) {
IndexschemaConfig icfg = model.getConfig(IndexschemaConfig.class, configId);
assertEquals(1, icfg.indexfield().size());
assertEquals(indexField, icfg.indexfield(0).name());
AttributesConfig acfg = model.getConfig(AttributesConfig.class, configId);
assertEquals(2, acfg.attribute().size());
assertEquals(attributeField, acfg.attribute(0).name());
assertEquals(attributeField+"_nfa", acfg.attribute(1).name());
RankProfilesConfig rcfg = model.getConfig(RankProfilesConfig.class, configId);
assertEquals(6, rcfg.rankprofile().size());
}
@Test
public void requireThatWeCanHaveMultipleSearchDefinitions() throws IOException, SAXException, ParseException {
final List<String> sds = Arrays.asList("type1", "type2", "type3");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
IndexedSearchCluster indexedSearchCluster = (IndexedSearchCluster)model.getSearchClusters().get(0);
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
String type1Id = "test/search/cluster.test/type1";
String type2Id = "test/search/cluster.test/type2";
String type3Id = "test/search/cluster.test/type3";
{
assertEquals(3, indexedSearchCluster.getDocumentDbs().size());
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(3, proton.documentdb().size());
assertEquals("type1", proton.documentdb(0).inputdoctypename());
assertEquals(type1Id, proton.documentdb(0).configid());
assertEquals("type2", proton.documentdb(1).inputdoctypename());
assertEquals(type2Id, proton.documentdb(1).configid());
assertEquals("type3", proton.documentdb(2).inputdoctypename());
assertEquals(type3Id, proton.documentdb(2).configid());
}
assertDocTypeConfig(model, type1Id, "f1", "f2");
assertDocTypeConfig(model, type2Id, "f3", "f4");
assertDocTypeConfig(model, type3Id, "f5", "f6");
{
IndexInfoConfig iicfg = model.getConfig(IndexInfoConfig.class, "test/search/cluster.test");
assertEquals(3, iicfg.indexinfo().size());
assertEquals("type1", iicfg.indexinfo().get(0).name());
assertEquals("type2", iicfg.indexinfo().get(1).name());
assertEquals("type3", iicfg.indexinfo().get(2).name());
}
{
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, type1Id);
assertEquals(2, rac1.attribute().size());
assertEquals("f2", rac1.attribute(0).name());
assertEquals("f2_nfa", rac1.attribute(1).name());
AttributesConfig rac2 = model.getConfig(AttributesConfig.class, type2Id);
assertEquals(2, rac2.attribute().size());
assertEquals("f4", rac2.attribute(0).name());
assertEquals("f4_nfa", rac2.attribute(1).name());
}
{
IlscriptsConfig icfg = model.getConfig(IlscriptsConfig.class, "test/search/cluster.test");
assertEquals(3, icfg.ilscript().size());
assertEquals("type1", icfg.ilscript(0).doctype());
assertEquals("type2", icfg.ilscript(1).doctype());
assertEquals("type3", icfg.ilscript(2).doctype());
}
}
@Test
public void requireThatRelevantConfigIsAvailableForClusterSearcher() throws ParseException, IOException, SAXException {
final List<String> sds = Arrays.asList("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, "index"),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
String searcherId = "container/searchchains/chain/test/component/com.yahoo.prelude.cluster.ClusterSearcher";
{
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, searcherId);
assertEquals(2, dcfg.documentdb().size());
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type1", db.name());
assertEquals(6, db.rankprofile().size());
assertRankProfile(db, 0, "default", false, false);
assertRankProfile(db, 1, "unranked", false, false);
assertRankProfile(db, 2, "staticrank", false, false);
assertRankProfile(db, 3, "summaryfeatures", true, false);
assertRankProfile(db, 4, "inheritedsummaryfeatures", true, false);
assertRankProfile(db, 5, "rankfeatures", false, true);
assertEquals(2, db.summaryclass().size());
assertEquals("default", db.summaryclass(0).name());
assertEquals("attributeprefetch", db.summaryclass(1).name());
assertSummaryField(db, 0, 0, "f1", "longstring", true);
assertSummaryField(db, 0, 1, "f2", "integer", false);
}
{
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(1);
assertEquals("type2", db.name());
}
}
{
AttributesConfig acfg = model.getConfig(AttributesConfig.class, searcherId);
assertEquals(4, acfg.attribute().size());
assertEquals("f2", acfg.attribute(0).name());
assertEquals("f2_nfa", acfg.attribute(1).name());
assertEquals("f4", acfg.attribute(2).name());
assertEquals("f4_nfa", acfg.attribute(3).name());
}
}
private void assertRankProfile(DocumentdbInfoConfig.Documentdb db, int index, String name,
boolean hasSummaryFeatures, boolean hasRankFeatures) {
DocumentdbInfoConfig.Documentdb.Rankprofile rankProfile0 = db.rankprofile(index);
assertEquals(name, rankProfile0.name());
assertEquals(hasSummaryFeatures, rankProfile0.hasSummaryFeatures());
assertEquals(hasRankFeatures, rankProfile0.hasRankFeatures());
}
private void assertSummaryField(DocumentdbInfoConfig.Documentdb db, int summaryClassIndex, int fieldIndex,
String name, String type, boolean dynamic) {
DocumentdbInfoConfig.Documentdb.Summaryclass.Fields field = db.summaryclass(summaryClassIndex).fields(fieldIndex);
assertEquals(name, field.name());
assertEquals(type, field.type());
assertEquals(dynamic, field.dynamic());
}
private void assertDocumentDBConfigAvailableForStreaming(String mode) {
final List<String> sds = Arrays.asList("type");
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
DocumentdbInfoConfig dcfg = model.getConfig(DocumentdbInfoConfig.class, "test/search/cluster.test.type");
assertEquals(1, dcfg.documentdb().size());
DocumentdbInfoConfig.Documentdb db = dcfg.documentdb(0);
assertEquals("type", db.name());
}
@Test
public void requireThatDocumentDBConfigIsAvailableForStreaming() throws ParseException, IOException, SAXException {
assertDocumentDBConfigAvailableForStreaming("streaming");
}
private void assertAttributesConfigIndependentOfMode(String mode, List<String> sds,
List<String> documentDBConfigIds,
Map<String, List<String>> expectedAttributesMap) {
VespaModel model = new VespaModelCreatorWithMockPkg(vespaHosts, createVespaServices(sds, mode),
ApplicationPackageUtils.generateSearchDefinitions(sds)).create();
ContentSearchCluster contentSearchCluster = model.getContentClusters().get("test").getSearch();
ProtonConfig proton = getProtonCfg(contentSearchCluster);
assertEquals(sds.size(), proton.documentdb().size());
for (int i = 0; i < sds.size(); i++) {
assertEquals(sds.get(i), proton.documentdb(i).inputdoctypename());
assertEquals(documentDBConfigIds.get(i), proton.documentdb(i).configid());
List<String> expectedAttributes = expectedAttributesMap.get(sds.get(i));
if (expectedAttributes != null) {
AttributesConfig rac1 = model.getConfig(AttributesConfig.class, proton.documentdb(i).configid());
assertEquals(expectedAttributes.size(), rac1.attribute().size());
for (int j = 0; j < expectedAttributes.size(); j++) {
assertEquals(expectedAttributes.get(j), rac1.attribute(j).name());
}
}
}
}
@Test
public void testThatAttributesConfigIsProducedForIndexed() {
assertAttributesConfigIndependentOfMode("index", Arrays.asList("type1"),
Arrays.asList("test/search/cluster.test/type1"),
ImmutableMap.of("type1", Arrays.asList("f2", "f2_nfa")));
}
@Test
@Test
public void testThatAttributesConfigIsNotProducedForStoreOnlyEvenForFastAccessFields() {
assertAttributesConfigIndependentOfMode("store-only", Arrays.asList("type1"),
Arrays.asList("test/search"), Collections.emptyMap());
}
} |
I think we should throw a timeout exception if we time out. It's up to the caller to ignore timeouts or not. | private ProcessResult executeInContainerAsUser(ContainerName containerName, String user, Optional<Long> timeoutSeconds, String... command) {
assert command.length >= 1;
try {
final ExecCreateCmdResponse response = dockerClient.execCreateCmd(containerName.asString())
.withCmd(command)
.withAttachStdout(true)
.withAttachStderr(true)
.withUser(user)
.exec();
ByteArrayOutputStream output = new ByteArrayOutputStream();
ByteArrayOutputStream errors = new ByteArrayOutputStream();
ExecStartCmd execStartCmd = dockerClient.execStartCmd(response.getId());
ExecStartResultCallback callback = execStartCmd.exec(new ExecStartResultCallback(output, errors));
if (timeoutSeconds.isPresent()) {
callback.awaitCompletion(timeoutSeconds.get(), TimeUnit.SECONDS);
} else {
callback.awaitCompletion();
}
final InspectExecResponse state = dockerClient.inspectExecCmd(execStartCmd.getExecId()).exec();
assert !state.isRunning();
Integer exitCode = state.getExitCode();
assert exitCode != null;
return new ProcessResult(exitCode, new String(output.toByteArray()), new String(errors.toByteArray()));
} catch (DockerException | InterruptedException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Container '" + containerName.asString()
+ "' failed to execute " + Arrays.toString(command), e);
}
} | callback.awaitCompletion(timeoutSeconds.get(), TimeUnit.SECONDS); | private ProcessResult executeInContainerAsUser(ContainerName containerName, String user, Optional<Long> timeoutSeconds, String... command) {
try {
final ExecCreateCmdResponse response = dockerClient.execCreateCmd(containerName.asString())
.withCmd(command)
.withAttachStdout(true)
.withAttachStderr(true)
.withUser(user)
.exec();
ByteArrayOutputStream output = new ByteArrayOutputStream();
ByteArrayOutputStream errors = new ByteArrayOutputStream();
ExecStartCmd execStartCmd = dockerClient.execStartCmd(response.getId());
ExecStartResultCallback callback = execStartCmd.exec(new ExecStartResultCallback(output, errors));
if (timeoutSeconds.isPresent()) {
if (!callback.awaitCompletion(timeoutSeconds.get(), TimeUnit.SECONDS)) {
throw new DockerExecTimeoutException(String.format("Command '%s' did not finish within %s seconds.", command[0], timeoutSeconds));
}
} else {
callback.awaitCompletion();
}
final InspectExecResponse state = dockerClient.inspectExecCmd(execStartCmd.getExecId()).exec();
assert !state.isRunning();
Integer exitCode = state.getExitCode();
assert exitCode != null;
return new ProcessResult(exitCode, new String(output.toByteArray()), new String(errors.toByteArray()));
} catch (DockerException | InterruptedException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Container '" + containerName.asString()
+ "' failed to execute " + Arrays.toString(command), e);
}
} | class DockerImpl implements Docker {
private static final Logger logger = Logger.getLogger(DockerImpl.class.getName());
public static final String DOCKER_CUSTOM_MACVLAN_NETWORK_NAME = "vespa-macvlan";
static final String LABEL_NAME_MANAGEDBY = "com.yahoo.vespa.managedby";
private final int SECONDS_TO_WAIT_BEFORE_KILLING;
private static final String FRAMEWORK_CONTAINER_PREFIX = "/";
private Optional<DockerImageGarbageCollector> dockerImageGC = Optional.empty();
private CounterWrapper numberOfDockerDaemonFails;
private final Object monitor = new Object();
@GuardedBy("monitor")
private final Map<DockerImage, CompletableFuture<DockerImage>> scheduledPulls = new HashMap<>();
final DockerClient dockerClient;
DockerImpl(final DockerClient dockerClient) {
this.dockerClient = dockerClient;
this.SECONDS_TO_WAIT_BEFORE_KILLING = 10;
}
DockerImpl(
final DockerConfig config,
boolean fallbackTo123OnErrors,
MetricReceiverWrapper metricReceiverWrapper) {
SECONDS_TO_WAIT_BEFORE_KILLING = config.secondsToWaitBeforeKillingContainer();
dockerClient = initDockerConnection(config, fallbackTo123OnErrors);
setMetrics(metricReceiverWrapper);
}
@Inject
public DockerImpl(final DockerConfig config, MetricReceiverWrapper metricReceiver) {
this(
config,
true, /* fallback to 1.23 on errors */
metricReceiver);
if (! config.isRunningLocally()) {
Duration minAgeToDelete = Duration.ofMinutes(config.imageGCMinTimeToLiveMinutes());
dockerImageGC = Optional.of(new DockerImageGarbageCollector(minAgeToDelete));
try {
setupDockerNetworkIfNeeded();
} catch (Exception e) {
throw new RuntimeException("Could not setup docker network", e);
}
}
}
static DefaultDockerClientConfig.Builder buildDockerClientConfig(DockerConfig config) {
DefaultDockerClientConfig.Builder dockerConfigBuilder = new DefaultDockerClientConfig.Builder()
.withDockerHost(config.uri());
if (URI.create(config.uri()).getScheme().equals("tcp") && !config.caCertPath().isEmpty()) {
dockerConfigBuilder.withCustomSslConfig(new VespaSSLConfig(config));
}
return dockerConfigBuilder;
}
private void setupDockerNetworkIfNeeded() throws IOException {
if (! dockerClient.listNetworksCmd().withNameFilter(DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).exec().isEmpty()) return;
List<InetAddress> hostAddresses = Arrays.asList(InetAddress.getAllByName(com.yahoo.net.HostName.getLocalhost()));
InetAddress hostAddress = Collections.max(hostAddresses,
(o1, o2) -> o1.getAddress().length - o2.getAddress().length);
NetworkAddressInterface networkAddressInterface = DockerNetworkCreator.getInterfaceForAddress(hostAddress);
boolean isIPv6 = networkAddressInterface.interfaceAddress.getAddress() instanceof Inet6Address;
Network.Ipam ipam = new Network.Ipam().withConfig(new Network.Ipam.Config()
.withSubnet(hostAddress.getHostAddress() + "/" + networkAddressInterface.interfaceAddress.getNetworkPrefixLength())
.withGateway(DockerNetworkCreator.getDefaultGatewayLinux(isIPv6).getHostAddress()));
Map<String, String> dockerNetworkOptions = new HashMap<>();
dockerNetworkOptions.put("parent", networkAddressInterface.networkInterface.getDisplayName());
dockerNetworkOptions.put("macvlan_mode", "bridge");
dockerClient.createNetworkCmd()
.withName(DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withDriver("macvlan")
.withEnableIpv6(isIPv6)
.withIpam(ipam)
.withOptions(dockerNetworkOptions)
.exec();
}
@Override
public void copyArchiveToContainer(String sourcePath, ContainerName destinationContainer, String destinationPath) {
try {
dockerClient.copyArchiveToContainerCmd(destinationContainer.asString())
.withHostResource(sourcePath).withRemotePath(destinationPath).exec();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to copy container " + sourcePath + " to " +
destinationContainer + ":" + destinationPath, e);
}
}
@Override
public CompletableFuture<DockerImage> pullImageAsync(final DockerImage image) {
final CompletableFuture<DockerImage> completionListener;
synchronized (monitor) {
if (scheduledPulls.containsKey(image)) {
return scheduledPulls.get(image);
}
completionListener = new CompletableFuture<>();
scheduledPulls.put(image, completionListener);
}
try {
dockerClient.pullImageCmd(image.asString()).exec(new ImagePullCallback(image));
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to pull image '" + image.asString() + "'", e);
}
return completionListener;
}
private CompletableFuture<DockerImage> removeScheduledPoll(final DockerImage image) {
synchronized (monitor) {
return scheduledPulls.remove(image);
}
}
/**
* Check if a given image is already in the local registry
*/
@Override
public boolean imageIsDownloaded(final DockerImage dockerImage) {
return inspectImage(dockerImage).isPresent();
}
private Optional<Image> inspectImage(DockerImage dockerImage) {
try {
return dockerClient.listImagesCmd().withShowAll(true)
.withImageNameFilter(dockerImage.asString()).exec().stream().findFirst();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to inspect image '" + dockerImage.asString() + "'", e);
}
}
@Override
public CreateContainerCommand createContainerCommand(DockerImage image, ContainerName name, String hostName) {
return new CreateContainerCommandImpl(dockerClient, image, name, hostName);
}
@Override
public void connectContainerToNetwork(ContainerName containerName, String networkName) {
try {
dockerClient.connectToNetworkCmd()
.withContainerId(containerName.asString())
.withNetworkId(networkName).exec();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to connect container '" + containerName.asString() +
"' to network '" + networkName + "'", e);
}
}
@Override
public ProcessResult executeInContainer(ContainerName containerName, Optional<Long> timeoutSeconds, String... args) {
return executeInContainerAsUser(containerName, "yahoo", timeoutSeconds, args);
}
@Override
public ProcessResult executeInContainerAsRoot(ContainerName containerName, Optional<Long> timeoutSeconds, String... args) {
return executeInContainerAsUser(containerName,"root", timeoutSeconds, args);
}
/**
* Execute command in container as user, "user" can be "username", "username:group", "uid" or "uid:gid"
*/
private Optional<InspectContainerResponse> inspectContainerCmd(String container) {
try {
return Optional.of(dockerClient.inspectContainerCmd(container).exec());
} catch (NotFoundException ignored) {
return Optional.empty();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to get info for container '" + container + "'", e);
}
}
@Override
public Optional<ContainerStats> getContainerStats(ContainerName containerName) {
try {
DockerStatsCallback statsCallback = dockerClient.statsCmd(containerName.asString()).exec(new DockerStatsCallback());
statsCallback.awaitCompletion(5, TimeUnit.SECONDS);
return statsCallback.stats.map(stats -> new ContainerStatsImpl(
stats.getNetworks(), stats.getCpuStats(), stats.getMemoryStats(), stats.getBlkioStats()));
} catch (NotFoundException ignored) {
return Optional.empty();
} catch (DockerException | InterruptedException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to get stats for container '" + containerName.asString() + "'", e);
}
}
@Override
public void startContainer(ContainerName containerName) {
try {
dockerClient.startContainerCmd(containerName.asString()).exec();
} catch (NotModifiedException ignored) {
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to start container '" + containerName.asString() + "'", e);
}
}
@Override
public void stopContainer(final ContainerName containerName) {
try {
dockerClient.stopContainerCmd(containerName.asString()).withTimeout(SECONDS_TO_WAIT_BEFORE_KILLING).exec();
} catch (NotModifiedException ignored) {
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to stop container '" + containerName.asString() + "'", e);
}
}
@Override
public void deleteContainer(ContainerName containerName) {
try {
dockerImageGC.ifPresent(imageGC -> {
Optional<InspectContainerResponse> inspectResponse = inspectContainerCmd(containerName.asString());
inspectResponse.ifPresent(response -> imageGC.updateLastUsedTimeFor(response.getImageId()));
});
dockerClient.removeContainerCmd(containerName.asString()).exec();
} catch (NotFoundException ignored) {
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to delete container '" + containerName.asString() + "'", e);
}
}
@Override
public List<Container> getAllContainersManagedBy(String manager) {
return listAllContainers().stream()
.filter(container -> isManagedBy(container, manager))
.map(com.github.dockerjava.api.model.Container::getId)
.flatMap(this::asContainer)
.collect(Collectors.toList());
}
@Override
public Optional<Container> getContainer(ContainerName containerName) {
return asContainer(containerName.asString()).findFirst();
}
private Stream<Container> asContainer(String container) {
return inspectContainerCmd(container)
.map(response ->
new Container(
response.getConfig().getHostName(),
new DockerImage(response.getConfig().getImage()),
new ContainerName(decode(response.getName())),
Container.State.valueOf(response.getState().getStatus().toUpperCase()),
response.getState().getPid(),
response.getCreated()
))
.map(Stream::of)
.orElse(Stream.empty());
}
private boolean isManagedBy(final com.github.dockerjava.api.model.Container container, String manager) {
final Map<String, String> labels = container.getLabels();
return labels != null && manager.equals(labels.get(LABEL_NAME_MANAGEDBY));
}
private String decode(String encodedContainerName) {
return encodedContainerName.substring(FRAMEWORK_CONTAINER_PREFIX.length());
}
private List<com.github.dockerjava.api.model.Container> listAllContainers() {
try {
return dockerClient.listContainersCmd().withShowAll(true).exec();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to list all containers", e);
}
}
private List<Image> listAllImages() {
try {
return dockerClient.listImagesCmd().withShowAll(true).exec();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to list all images", e);
}
}
@Override
public void deleteImage(final DockerImage dockerImage) {
try {
dockerClient.removeImageCmd(dockerImage.asString()).exec();
} catch (NotFoundException ignored) {
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to delete docker image " + dockerImage.asString(), e);
}
}
@Override
public void buildImage(File dockerfile, DockerImage image) {
try {
dockerClient.buildImageCmd(dockerfile).withTag(image.asString())
.exec(new BuildImageResultCallback()).awaitImageId();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to build image " + image.asString(), e);
}
}
@Override
public void deleteUnusedDockerImages() {
if (! dockerImageGC.isPresent()) return;
List<Image> images = listAllImages();
List<com.github.dockerjava.api.model.Container> containers = listAllContainers();
dockerImageGC.get().getUnusedDockerImages(images, containers).forEach(this::deleteImage);
}
private class ImagePullCallback extends PullImageResultCallback {
private final DockerImage dockerImage;
private ImagePullCallback(DockerImage dockerImage) {
this.dockerImage = dockerImage;
}
@Override
public void onError(Throwable throwable) {
removeScheduledPoll(dockerImage).completeExceptionally(throwable);
}
@Override
public void onComplete() {
Optional<Image> image = inspectImage(dockerImage);
if (image.isPresent()) {
dockerImageGC.ifPresent(imageGC -> imageGC.updateLastUsedTimeFor(image.get().getId()));
removeScheduledPoll(dockerImage).complete(dockerImage);
} else {
removeScheduledPoll(dockerImage).completeExceptionally(
new DockerClientException("Could not download image: " + dockerImage));
}
}
}
private class DockerStatsCallback extends ResultCallbackTemplate<DockerStatsCallback, Statistics> {
private Optional<Statistics> stats = Optional.empty();
private final CountDownLatch completed = new CountDownLatch(1);
@Override
public void onNext(Statistics stats) {
if (stats != null) {
this.stats = Optional.of(stats);
completed.countDown();
onComplete();
}
}
@Override
public boolean awaitCompletion(long timeout, TimeUnit timeUnit) throws InterruptedException {
return completed.await(timeout, timeUnit);
}
}
private DockerClient initDockerConnection(final DockerConfig config, boolean fallbackTo123orErrors) {
JerseyDockerCmdExecFactory dockerFactory = new JerseyDockerCmdExecFactory()
.withMaxPerRouteConnections(config.maxPerRouteConnections())
.withMaxTotalConnections(config.maxTotalConnections())
.withConnectTimeout(config.connectTimeoutMillis())
.withReadTimeout(config.readTimeoutMillis());
RemoteApiVersion remoteApiVersion;
try {
remoteApiVersion = RemoteApiVersion.parseConfig(DockerClientImpl.getInstance(
buildDockerClientConfig(config).build())
.withDockerCmdExecFactory(dockerFactory).versionCmd().exec().getApiVersion());
logger.info("Found version of remote docker API: "+ remoteApiVersion);
if (remoteApiVersion.isGreaterOrEqual(RemoteApiVersion.VERSION_1_24)) {
remoteApiVersion = RemoteApiVersion.VERSION_1_23;
logger.info("Found version 1.24 or newer of remote API, using 1.23.");
}
} catch (Exception e) {
if (! fallbackTo123orErrors) {
throw e;
}
logger.log(LogLevel.ERROR, "Failed when trying to figure out remote API version of docker, using 1.23", e);
remoteApiVersion = RemoteApiVersion.VERSION_1_23;
}
return DockerClientImpl.getInstance(
buildDockerClientConfig(config)
.withApiVersion(remoteApiVersion)
.build())
.withDockerCmdExecFactory(dockerFactory);
}
private void setMetrics(MetricReceiverWrapper metricReceiver) {
Dimensions dimensions = new Dimensions.Builder()
.add("host", HostName.getLocalhost())
.add("role", "docker").build();
numberOfDockerDaemonFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "daemon.api_fails");
}
} | class DockerImpl implements Docker {
private static final Logger logger = Logger.getLogger(DockerImpl.class.getName());
public static final String DOCKER_CUSTOM_MACVLAN_NETWORK_NAME = "vespa-macvlan";
static final String LABEL_NAME_MANAGEDBY = "com.yahoo.vespa.managedby";
private final int SECONDS_TO_WAIT_BEFORE_KILLING;
private static final String FRAMEWORK_CONTAINER_PREFIX = "/";
private Optional<DockerImageGarbageCollector> dockerImageGC = Optional.empty();
private CounterWrapper numberOfDockerDaemonFails;
private final Object monitor = new Object();
@GuardedBy("monitor")
private final Map<DockerImage, CompletableFuture<DockerImage>> scheduledPulls = new HashMap<>();
final DockerClient dockerClient;
DockerImpl(final DockerClient dockerClient) {
this.dockerClient = dockerClient;
this.SECONDS_TO_WAIT_BEFORE_KILLING = 10;
}
DockerImpl(
final DockerConfig config,
boolean fallbackTo123OnErrors,
MetricReceiverWrapper metricReceiverWrapper) {
SECONDS_TO_WAIT_BEFORE_KILLING = config.secondsToWaitBeforeKillingContainer();
dockerClient = initDockerConnection(config, fallbackTo123OnErrors);
setMetrics(metricReceiverWrapper);
}
@Inject
public DockerImpl(final DockerConfig config, MetricReceiverWrapper metricReceiver) {
this(
config,
true, /* fallback to 1.23 on errors */
metricReceiver);
if (!config.isRunningLocally()) {
Duration minAgeToDelete = Duration.ofMinutes(config.imageGCMinTimeToLiveMinutes());
dockerImageGC = Optional.of(new DockerImageGarbageCollector(minAgeToDelete));
try {
setupDockerNetworkIfNeeded();
} catch (Exception e) {
throw new RuntimeException("Could not setup docker network", e);
}
}
}
static DefaultDockerClientConfig.Builder buildDockerClientConfig(DockerConfig config) {
DefaultDockerClientConfig.Builder dockerConfigBuilder = new DefaultDockerClientConfig.Builder()
.withDockerHost(config.uri());
if (URI.create(config.uri()).getScheme().equals("tcp") && !config.caCertPath().isEmpty()) {
dockerConfigBuilder.withCustomSslConfig(new VespaSSLConfig(config));
}
return dockerConfigBuilder;
}
private void setupDockerNetworkIfNeeded() throws IOException {
if (!dockerClient.listNetworksCmd().withNameFilter(DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).exec().isEmpty()) return;
List<InetAddress> hostAddresses = Arrays.asList(InetAddress.getAllByName(com.yahoo.net.HostName.getLocalhost()));
InetAddress hostAddress = Collections.max(hostAddresses,
(o1, o2) -> o1.getAddress().length - o2.getAddress().length);
NetworkAddressInterface networkAddressInterface = DockerNetworkCreator.getInterfaceForAddress(hostAddress);
boolean isIPv6 = networkAddressInterface.interfaceAddress.getAddress() instanceof Inet6Address;
Network.Ipam ipam = new Network.Ipam().withConfig(new Network.Ipam.Config()
.withSubnet(hostAddress.getHostAddress() + "/" + networkAddressInterface.interfaceAddress.getNetworkPrefixLength())
.withGateway(DockerNetworkCreator.getDefaultGatewayLinux(isIPv6).getHostAddress()));
Map<String, String> dockerNetworkOptions = new HashMap<>();
dockerNetworkOptions.put("parent", networkAddressInterface.networkInterface.getDisplayName());
dockerNetworkOptions.put("macvlan_mode", "bridge");
dockerClient.createNetworkCmd()
.withName(DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withDriver("macvlan")
.withEnableIpv6(isIPv6)
.withIpam(ipam)
.withOptions(dockerNetworkOptions)
.exec();
}
@Override
public void copyArchiveToContainer(String sourcePath, ContainerName destinationContainer, String destinationPath) {
try {
dockerClient.copyArchiveToContainerCmd(destinationContainer.asString())
.withHostResource(sourcePath).withRemotePath(destinationPath).exec();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to copy container " + sourcePath + " to " +
destinationContainer + ":" + destinationPath, e);
}
}
@Override
public CompletableFuture<DockerImage> pullImageAsync(final DockerImage image) {
final CompletableFuture<DockerImage> completionListener;
synchronized (monitor) {
if (scheduledPulls.containsKey(image)) {
return scheduledPulls.get(image);
}
completionListener = new CompletableFuture<>();
scheduledPulls.put(image, completionListener);
}
try {
dockerClient.pullImageCmd(image.asString()).exec(new ImagePullCallback(image));
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to pull image '" + image.asString() + "'", e);
}
return completionListener;
}
private CompletableFuture<DockerImage> removeScheduledPoll(final DockerImage image) {
synchronized (monitor) {
return scheduledPulls.remove(image);
}
}
/**
* Check if a given image is already in the local registry
*/
@Override
public boolean imageIsDownloaded(final DockerImage dockerImage) {
return inspectImage(dockerImage).isPresent();
}
private Optional<Image> inspectImage(DockerImage dockerImage) {
try {
return dockerClient.listImagesCmd().withShowAll(true)
.withImageNameFilter(dockerImage.asString()).exec().stream().findFirst();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to inspect image '" + dockerImage.asString() + "'", e);
}
}
@Override
public CreateContainerCommand createContainerCommand(DockerImage image, ContainerName name, String hostName) {
return new CreateContainerCommandImpl(dockerClient, image, name, hostName);
}
@Override
public void connectContainerToNetwork(ContainerName containerName, String networkName) {
try {
dockerClient.connectToNetworkCmd()
.withContainerId(containerName.asString())
.withNetworkId(networkName).exec();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to connect container '" + containerName.asString() +
"' to network '" + networkName + "'", e);
}
}
@Override
public ProcessResult executeInContainer(ContainerName containerName, String... args) {
return executeInContainerAsUser(containerName, "yahoo", Optional.empty(), args);
}
@Override
public ProcessResult executeInContainerAsRoot(ContainerName containerName, String... args) {
return executeInContainerAsUser(containerName, "root", Optional.empty(), args);
}
@Override
public ProcessResult executeInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... args) {
return executeInContainerAsUser(containerName, "root", Optional.of(timeoutSeconds), args);
}
/**
* Execute command in container as user, "user" can be "username", "username:group", "uid" or "uid:gid"
*/
private Optional<InspectContainerResponse> inspectContainerCmd(String container) {
try {
return Optional.of(dockerClient.inspectContainerCmd(container).exec());
} catch (NotFoundException ignored) {
return Optional.empty();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to get info for container '" + container + "'", e);
}
}
@Override
public Optional<ContainerStats> getContainerStats(ContainerName containerName) {
try {
DockerStatsCallback statsCallback = dockerClient.statsCmd(containerName.asString()).exec(new DockerStatsCallback());
statsCallback.awaitCompletion(5, TimeUnit.SECONDS);
return statsCallback.stats.map(stats -> new ContainerStatsImpl(
stats.getNetworks(), stats.getCpuStats(), stats.getMemoryStats(), stats.getBlkioStats()));
} catch (NotFoundException ignored) {
return Optional.empty();
} catch (DockerException | InterruptedException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to get stats for container '" + containerName.asString() + "'", e);
}
}
@Override
public void startContainer(ContainerName containerName) {
try {
dockerClient.startContainerCmd(containerName.asString()).exec();
} catch (NotModifiedException ignored) {
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to start container '" + containerName.asString() + "'", e);
}
}
@Override
public void stopContainer(final ContainerName containerName) {
try {
dockerClient.stopContainerCmd(containerName.asString()).withTimeout(SECONDS_TO_WAIT_BEFORE_KILLING).exec();
} catch (NotModifiedException ignored) {
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to stop container '" + containerName.asString() + "'", e);
}
}
@Override
public void deleteContainer(ContainerName containerName) {
try {
dockerImageGC.ifPresent(imageGC -> {
Optional<InspectContainerResponse> inspectResponse = inspectContainerCmd(containerName.asString());
inspectResponse.ifPresent(response -> imageGC.updateLastUsedTimeFor(response.getImageId()));
});
dockerClient.removeContainerCmd(containerName.asString()).exec();
} catch (NotFoundException ignored) {
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to delete container '" + containerName.asString() + "'", e);
}
}
@Override
public List<Container> getAllContainersManagedBy(String manager) {
return listAllContainers().stream()
.filter(container -> isManagedBy(container, manager))
.map(com.github.dockerjava.api.model.Container::getId)
.flatMap(this::asContainer)
.collect(Collectors.toList());
}
@Override
public Optional<Container> getContainer(ContainerName containerName) {
return asContainer(containerName.asString()).findFirst();
}
private Stream<Container> asContainer(String container) {
return inspectContainerCmd(container)
.map(response ->
new Container(
response.getConfig().getHostName(),
new DockerImage(response.getConfig().getImage()),
new ContainerName(decode(response.getName())),
Container.State.valueOf(response.getState().getStatus().toUpperCase()),
response.getState().getPid(),
response.getCreated()
))
.map(Stream::of)
.orElse(Stream.empty());
}
private boolean isManagedBy(final com.github.dockerjava.api.model.Container container, String manager) {
final Map<String, String> labels = container.getLabels();
return labels != null && manager.equals(labels.get(LABEL_NAME_MANAGEDBY));
}
private String decode(String encodedContainerName) {
return encodedContainerName.substring(FRAMEWORK_CONTAINER_PREFIX.length());
}
private List<com.github.dockerjava.api.model.Container> listAllContainers() {
try {
return dockerClient.listContainersCmd().withShowAll(true).exec();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to list all containers", e);
}
}
private List<Image> listAllImages() {
try {
return dockerClient.listImagesCmd().withShowAll(true).exec();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to list all images", e);
}
}
@Override
public void deleteImage(final DockerImage dockerImage) {
try {
dockerClient.removeImageCmd(dockerImage.asString()).exec();
} catch (NotFoundException ignored) {
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to delete docker image " + dockerImage.asString(), e);
}
}
@Override
public void buildImage(File dockerfile, DockerImage image) {
try {
dockerClient.buildImageCmd(dockerfile).withTag(image.asString())
.exec(new BuildImageResultCallback()).awaitImageId();
} catch (DockerException e) {
numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to build image " + image.asString(), e);
}
}
@Override
public void deleteUnusedDockerImages() {
if (!dockerImageGC.isPresent()) return;
List<Image> images = listAllImages();
List<com.github.dockerjava.api.model.Container> containers = listAllContainers();
dockerImageGC.get().getUnusedDockerImages(images, containers).forEach(this::deleteImage);
}
private class ImagePullCallback extends PullImageResultCallback {
private final DockerImage dockerImage;
private ImagePullCallback(DockerImage dockerImage) {
this.dockerImage = dockerImage;
}
@Override
public void onError(Throwable throwable) {
removeScheduledPoll(dockerImage).completeExceptionally(throwable);
}
@Override
public void onComplete() {
Optional<Image> image = inspectImage(dockerImage);
if (image.isPresent()) {
dockerImageGC.ifPresent(imageGC -> imageGC.updateLastUsedTimeFor(image.get().getId()));
removeScheduledPoll(dockerImage).complete(dockerImage);
} else {
removeScheduledPoll(dockerImage).completeExceptionally(
new DockerClientException("Could not download image: " + dockerImage));
}
}
}
private class DockerStatsCallback extends ResultCallbackTemplate<DockerStatsCallback, Statistics> {
private Optional<Statistics> stats = Optional.empty();
private final CountDownLatch completed = new CountDownLatch(1);
@Override
public void onNext(Statistics stats) {
if (stats != null) {
this.stats = Optional.of(stats);
completed.countDown();
onComplete();
}
}
@Override
public boolean awaitCompletion(long timeout, TimeUnit timeUnit) throws InterruptedException {
return completed.await(timeout, timeUnit);
}
}
private DockerClient initDockerConnection(final DockerConfig config, boolean fallbackTo123orErrors) {
JerseyDockerCmdExecFactory dockerFactory = new JerseyDockerCmdExecFactory()
.withMaxPerRouteConnections(config.maxPerRouteConnections())
.withMaxTotalConnections(config.maxTotalConnections())
.withConnectTimeout(config.connectTimeoutMillis())
.withReadTimeout(config.readTimeoutMillis());
RemoteApiVersion remoteApiVersion;
try {
remoteApiVersion = RemoteApiVersion.parseConfig(DockerClientImpl.getInstance(
buildDockerClientConfig(config).build())
.withDockerCmdExecFactory(dockerFactory).versionCmd().exec().getApiVersion());
logger.info("Found version of remote docker API: " + remoteApiVersion);
if (remoteApiVersion.isGreaterOrEqual(RemoteApiVersion.VERSION_1_24)) {
remoteApiVersion = RemoteApiVersion.VERSION_1_23;
logger.info("Found version 1.24 or newer of remote API, using 1.23.");
}
} catch (Exception e) {
if (!fallbackTo123orErrors) {
throw e;
}
logger.log(LogLevel.ERROR, "Failed when trying to figure out remote API version of docker, using 1.23", e);
remoteApiVersion = RemoteApiVersion.VERSION_1_23;
}
return DockerClientImpl.getInstance(
buildDockerClientConfig(config)
.withApiVersion(remoteApiVersion)
.build())
.withDockerCmdExecFactory(dockerFactory);
}
private void setMetrics(MetricReceiverWrapper metricReceiver) {
Dimensions dimensions = new Dimensions.Builder()
.add("host", HostName.getLocalhost())
.add("role", "docker").build();
numberOfDockerDaemonFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "daemon.api_fails");
}
} |
Do we need to create new container for all 3 exec tests? | public void testContainerExecHounorsTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.of(1L), "sh", "-c", "sleep 5");
} | final ContainerName containerName = new ContainerName("docker-test-foo"); | public void testContainerExecHounorsTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
docker.startContainer(containerName);
docker.executeInContainerAsRoot(containerName, 1L, "sh", "-c", "sleep 5");
} | class DockerTest {
private DockerImpl docker;
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
private static final String MANAGER_NAME = "docker-test";
@Ignore
@Test
public void testDockerImagePullDelete() throws ExecutionException, InterruptedException {
DockerImage dockerImage = new DockerImage("busybox:1.24.0");
docker.pullImageAsync(dockerImage).get();
assertTrue("Failed to download " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
docker.deleteImage(dockerImage);
assertFalse("Failed to delete " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
}
@Ignore
@Test
public void testOutOfMemoryDoesNotAffectOtherContainers() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress1)
.withMemoryInMb(100).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress2)
.withMemoryInMb(100).create();
docker.startContainer(containerName2);
assertThat(docker.executeInContainerAsRoot(containerName2, Optional.empty(), "python", "/pysrc/fillmem.py", "90").getExitStatus(), is(137));
testReachabilityFromHost("http:
testReachabilityFromHost("http:
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Test
public void testContainerCycle() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.dockerClient.pauseContainerCmd(containerName.asString()).exec();
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.PAUSED);
docker.dockerClient.unpauseContainerCmd(containerName.asString()).exec();
docker.stopContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.EXITED);
docker.deleteContainer(containerName);
assertThat(docker.getAllContainersManagedBy(MANAGER_NAME).isEmpty(), is(true));
}
/**
* Test the expected behavior for exec when it times out - it should throw an exception with it times out and before the process completes
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow but lower than the process time.
*/
@Test(expected = DockerExecTimeoutException.class, timeout = 4000)
/**
* Test the expected behavior for exec that completes before specified timeout - it should return when the process finishes and not
* wait for the timeout. Some previous tests indicated that this was not behaving correctly.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow
*/
@Test(timeout = 4000)
public void testContainerExecReturnsBeforeTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.of(5L), "sh", "-c", "echo hei");
}
/**
* Test the expected behavior for exec without timeout - it should wait for the process as long as it takes and
* not throw exceptions.
*/
@Test
public void testContainerExecNoTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.empty(), "sh", "-c", "sleep 2");
}
@Test
public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress1).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress2).create();
docker.startContainer(containerName2);
testReachabilityFromHost("http:
testReachabilityFromHost("http:
String[] curlFromNodeToNode = new String[]{"curl", "-g", "http:
ProcessResult result = docker.executeInContainerAsRoot(containerName1, Optional.empty(), curlFromNodeToNode);
assertThat("Could not reach " + containerName2.asString() + " from " + containerName1.asString(),
result.getOutput(), is("pong\n"));
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Before
public void setup() throws InterruptedException, ExecutionException, IOException {
if (docker == null) {
assumeTrue(DockerTestUtils.dockerDaemonIsPresent());
docker = DockerTestUtils.getDocker();
DockerTestUtils.buildSimpleHttpServerDockerImage(docker, dockerImage);
}
docker.getAllContainersManagedBy(MANAGER_NAME).forEach(container -> {
if (container.state.isRunning()) docker.stopContainer(container.name);
docker.deleteContainer(container.name);
});
}
private void testReachabilityFromHost(String target) throws IOException, InterruptedException {
URL url = new URL(target);
String containerServer = IOUtils.toString(url.openStream());
assertThat(containerServer, is("pong\n"));
}
} | class DockerTest {
private DockerImpl docker;
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
private static final String MANAGER_NAME = "docker-test";
@Ignore
@Test
public void testDockerImagePullDelete() throws ExecutionException, InterruptedException {
DockerImage dockerImage = new DockerImage("busybox:1.24.0");
docker.pullImageAsync(dockerImage).get();
assertTrue("Failed to download " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
docker.deleteImage(dockerImage);
assertFalse("Failed to delete " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
}
@Ignore
@Test
public void testOutOfMemoryDoesNotAffectOtherContainers() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress1)
.withMemoryInMb(100).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress2)
.withMemoryInMb(100).create();
docker.startContainer(containerName2);
assertThat(docker.executeInContainerAsRoot(containerName2, "python", "/pysrc/fillmem.py", "90").getExitStatus(), is(137));
testReachabilityFromHost("http:
testReachabilityFromHost("http:
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Test
public void testContainerCycle() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.dockerClient.pauseContainerCmd(containerName.asString()).exec();
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.PAUSED);
docker.dockerClient.unpauseContainerCmd(containerName.asString()).exec();
docker.stopContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.EXITED);
docker.deleteContainer(containerName);
assertThat(docker.getAllContainersManagedBy(MANAGER_NAME).isEmpty(), is(true));
}
/**
* Test the expected behavior for exec when it times out - it should throw an exception when it times out,
* and before the process completes.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow but lower than the process time.
*/
@Test(expected = DockerExecTimeoutException.class, timeout = 2000)
/**
* Test the expected behavior for exec that completes before specified timeout - it should return when the process finishes and not
* wait for the timeout. Some previous tests indicated that this was not behaving correctly.
*
* No timeout implies infinite timeout.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow
*/
@Test(timeout = 4000)
public void testContainerExecDoesNotBlockUntilTimeoutWhenCommandFinishesBeforeTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
docker.startContainer(containerName);
docker.executeInContainerAsRoot(containerName, 2L, "sh", "-c", "echo hei");
docker.executeInContainerAsRoot(containerName,"sh", "-c", "echo hei");
}
@Test
public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress1).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress2).create();
docker.startContainer(containerName2);
testReachabilityFromHost("http:
testReachabilityFromHost("http:
String[] curlFromNodeToNode = new String[]{"curl", "-g", "http:
ProcessResult result = docker.executeInContainerAsRoot(containerName1, curlFromNodeToNode);
assertThat("Could not reach " + containerName2.asString() + " from " + containerName1.asString(),
result.getOutput(), is("pong\n"));
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Before
public void setup() throws InterruptedException, ExecutionException, IOException {
if (docker == null) {
assumeTrue(DockerTestUtils.dockerDaemonIsPresent());
docker = DockerTestUtils.getDocker();
DockerTestUtils.buildSimpleHttpServerDockerImage(docker, dockerImage);
}
docker.getAllContainersManagedBy(MANAGER_NAME).forEach(container -> {
if (container.state.isRunning()) docker.stopContainer(container.name);
docker.deleteContainer(container.name);
});
}
private void testReachabilityFromHost(String target) throws IOException, InterruptedException {
URL url = new URL(target);
String containerServer = IOUtils.toString(url.openStream());
assertThat(containerServer, is("pong\n"));
}
} |
You mean combine the three test cases into one? Or setup a commen container for the testclass? | public void testContainerExecHounorsTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.of(1L), "sh", "-c", "sleep 5");
} | final ContainerName containerName = new ContainerName("docker-test-foo"); | public void testContainerExecHounorsTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
docker.startContainer(containerName);
docker.executeInContainerAsRoot(containerName, 1L, "sh", "-c", "sleep 5");
} | class DockerTest {
private DockerImpl docker;
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
private static final String MANAGER_NAME = "docker-test";
@Ignore
@Test
public void testDockerImagePullDelete() throws ExecutionException, InterruptedException {
DockerImage dockerImage = new DockerImage("busybox:1.24.0");
docker.pullImageAsync(dockerImage).get();
assertTrue("Failed to download " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
docker.deleteImage(dockerImage);
assertFalse("Failed to delete " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
}
@Ignore
@Test
public void testOutOfMemoryDoesNotAffectOtherContainers() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress1)
.withMemoryInMb(100).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress2)
.withMemoryInMb(100).create();
docker.startContainer(containerName2);
assertThat(docker.executeInContainerAsRoot(containerName2, Optional.empty(), "python", "/pysrc/fillmem.py", "90").getExitStatus(), is(137));
testReachabilityFromHost("http:
testReachabilityFromHost("http:
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Test
public void testContainerCycle() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.dockerClient.pauseContainerCmd(containerName.asString()).exec();
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.PAUSED);
docker.dockerClient.unpauseContainerCmd(containerName.asString()).exec();
docker.stopContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.EXITED);
docker.deleteContainer(containerName);
assertThat(docker.getAllContainersManagedBy(MANAGER_NAME).isEmpty(), is(true));
}
/**
* Test the expected behavior for exec when it times out - it should throw an exception with it times out and before the process completes
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow but lower than the process time.
*/
@Test(expected = DockerExecTimeoutException.class, timeout = 4000)
/**
* Test the expected behavior for exec that completes before specified timeout - it should return when the process finishes and not
* wait for the timeout. Some previous tests indicated that this was not behaving correctly.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow
*/
@Test(timeout = 4000)
public void testContainerExecReturnsBeforeTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.of(5L), "sh", "-c", "echo hei");
}
/**
* Test the expected behavior for exec without timeout - it should wait for the process as long as it takes and
* not throw exceptions.
*/
@Test
public void testContainerExecNoTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.empty(), "sh", "-c", "sleep 2");
}
@Test
public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress1).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress2).create();
docker.startContainer(containerName2);
testReachabilityFromHost("http:
testReachabilityFromHost("http:
String[] curlFromNodeToNode = new String[]{"curl", "-g", "http:
ProcessResult result = docker.executeInContainerAsRoot(containerName1, Optional.empty(), curlFromNodeToNode);
assertThat("Could not reach " + containerName2.asString() + " from " + containerName1.asString(),
result.getOutput(), is("pong\n"));
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Before
public void setup() throws InterruptedException, ExecutionException, IOException {
if (docker == null) {
assumeTrue(DockerTestUtils.dockerDaemonIsPresent());
docker = DockerTestUtils.getDocker();
DockerTestUtils.buildSimpleHttpServerDockerImage(docker, dockerImage);
}
docker.getAllContainersManagedBy(MANAGER_NAME).forEach(container -> {
if (container.state.isRunning()) docker.stopContainer(container.name);
docker.deleteContainer(container.name);
});
}
private void testReachabilityFromHost(String target) throws IOException, InterruptedException {
URL url = new URL(target);
String containerServer = IOUtils.toString(url.openStream());
assertThat(containerServer, is("pong\n"));
}
} | class DockerTest {
private DockerImpl docker;
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
private static final String MANAGER_NAME = "docker-test";
@Ignore
@Test
public void testDockerImagePullDelete() throws ExecutionException, InterruptedException {
DockerImage dockerImage = new DockerImage("busybox:1.24.0");
docker.pullImageAsync(dockerImage).get();
assertTrue("Failed to download " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
docker.deleteImage(dockerImage);
assertFalse("Failed to delete " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
}
@Ignore
@Test
public void testOutOfMemoryDoesNotAffectOtherContainers() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress1)
.withMemoryInMb(100).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress2)
.withMemoryInMb(100).create();
docker.startContainer(containerName2);
assertThat(docker.executeInContainerAsRoot(containerName2, "python", "/pysrc/fillmem.py", "90").getExitStatus(), is(137));
testReachabilityFromHost("http:
testReachabilityFromHost("http:
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Test
public void testContainerCycle() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.dockerClient.pauseContainerCmd(containerName.asString()).exec();
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.PAUSED);
docker.dockerClient.unpauseContainerCmd(containerName.asString()).exec();
docker.stopContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.EXITED);
docker.deleteContainer(containerName);
assertThat(docker.getAllContainersManagedBy(MANAGER_NAME).isEmpty(), is(true));
}
/**
* Test the expected behavior for exec when it times out - it should throw an exception when it times out,
* and before the process completes.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow but lower than the process time.
*/
@Test(expected = DockerExecTimeoutException.class, timeout = 2000)
/**
* Test the expected behavior for exec that completes before specified timeout - it should return when the process finishes and not
* wait for the timeout. Some previous tests indicated that this was not behaving correctly.
*
* No timeout implies infinite timeout.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow
*/
@Test(timeout = 4000)
public void testContainerExecDoesNotBlockUntilTimeoutWhenCommandFinishesBeforeTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
docker.startContainer(containerName);
docker.executeInContainerAsRoot(containerName, 2L, "sh", "-c", "echo hei");
docker.executeInContainerAsRoot(containerName,"sh", "-c", "echo hei");
}
@Test
public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress1).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress2).create();
docker.startContainer(containerName2);
testReachabilityFromHost("http:
testReachabilityFromHost("http:
String[] curlFromNodeToNode = new String[]{"curl", "-g", "http:
ProcessResult result = docker.executeInContainerAsRoot(containerName1, curlFromNodeToNode);
assertThat("Could not reach " + containerName2.asString() + " from " + containerName1.asString(),
result.getOutput(), is("pong\n"));
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Before
public void setup() throws InterruptedException, ExecutionException, IOException {
if (docker == null) {
assumeTrue(DockerTestUtils.dockerDaemonIsPresent());
docker = DockerTestUtils.getDocker();
DockerTestUtils.buildSimpleHttpServerDockerImage(docker, dockerImage);
}
docker.getAllContainersManagedBy(MANAGER_NAME).forEach(container -> {
if (container.state.isRunning()) docker.stopContainer(container.name);
docker.deleteContainer(container.name);
});
}
private void testReachabilityFromHost(String target) throws IOException, InterruptedException {
URL url = new URL(target);
String containerServer = IOUtils.toString(url.openStream());
assertThat(containerServer, is("pong\n"));
}
} |
Combine | public void testContainerExecHounorsTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.of(1L), "sh", "-c", "sleep 5");
} | final ContainerName containerName = new ContainerName("docker-test-foo"); | public void testContainerExecHounorsTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
docker.startContainer(containerName);
docker.executeInContainerAsRoot(containerName, 1L, "sh", "-c", "sleep 5");
} | class DockerTest {
private DockerImpl docker;
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
private static final String MANAGER_NAME = "docker-test";
@Ignore
@Test
public void testDockerImagePullDelete() throws ExecutionException, InterruptedException {
DockerImage dockerImage = new DockerImage("busybox:1.24.0");
docker.pullImageAsync(dockerImage).get();
assertTrue("Failed to download " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
docker.deleteImage(dockerImage);
assertFalse("Failed to delete " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
}
@Ignore
@Test
public void testOutOfMemoryDoesNotAffectOtherContainers() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress1)
.withMemoryInMb(100).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress2)
.withMemoryInMb(100).create();
docker.startContainer(containerName2);
assertThat(docker.executeInContainerAsRoot(containerName2, Optional.empty(), "python", "/pysrc/fillmem.py", "90").getExitStatus(), is(137));
testReachabilityFromHost("http:
testReachabilityFromHost("http:
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Test
public void testContainerCycle() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.dockerClient.pauseContainerCmd(containerName.asString()).exec();
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.PAUSED);
docker.dockerClient.unpauseContainerCmd(containerName.asString()).exec();
docker.stopContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.EXITED);
docker.deleteContainer(containerName);
assertThat(docker.getAllContainersManagedBy(MANAGER_NAME).isEmpty(), is(true));
}
/**
* Test the expected behavior for exec when it times out - it should throw an exception with it times out and before the process completes
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow but lower than the process time.
*/
@Test(expected = DockerExecTimeoutException.class, timeout = 4000)
/**
* Test the expected behavior for exec that completes before specified timeout - it should return when the process finishes and not
* wait for the timeout. Some previous tests indicated that this was not behaving correctly.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow
*/
@Test(timeout = 4000)
public void testContainerExecReturnsBeforeTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.of(5L), "sh", "-c", "echo hei");
}
/**
* Test the expected behavior for exec without timeout - it should wait for the process as long as it takes and
* not throw exceptions.
*/
@Test
public void testContainerExecNoTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.empty(), "sh", "-c", "sleep 2");
}
@Test
public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress1).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress2).create();
docker.startContainer(containerName2);
testReachabilityFromHost("http:
testReachabilityFromHost("http:
String[] curlFromNodeToNode = new String[]{"curl", "-g", "http:
ProcessResult result = docker.executeInContainerAsRoot(containerName1, Optional.empty(), curlFromNodeToNode);
assertThat("Could not reach " + containerName2.asString() + " from " + containerName1.asString(),
result.getOutput(), is("pong\n"));
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Before
public void setup() throws InterruptedException, ExecutionException, IOException {
if (docker == null) {
assumeTrue(DockerTestUtils.dockerDaemonIsPresent());
docker = DockerTestUtils.getDocker();
DockerTestUtils.buildSimpleHttpServerDockerImage(docker, dockerImage);
}
docker.getAllContainersManagedBy(MANAGER_NAME).forEach(container -> {
if (container.state.isRunning()) docker.stopContainer(container.name);
docker.deleteContainer(container.name);
});
}
private void testReachabilityFromHost(String target) throws IOException, InterruptedException {
URL url = new URL(target);
String containerServer = IOUtils.toString(url.openStream());
assertThat(containerServer, is("pong\n"));
}
} | class DockerTest {
private DockerImpl docker;
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
private static final String MANAGER_NAME = "docker-test";
@Ignore
@Test
public void testDockerImagePullDelete() throws ExecutionException, InterruptedException {
DockerImage dockerImage = new DockerImage("busybox:1.24.0");
docker.pullImageAsync(dockerImage).get();
assertTrue("Failed to download " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
docker.deleteImage(dockerImage);
assertFalse("Failed to delete " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
}
@Ignore
@Test
public void testOutOfMemoryDoesNotAffectOtherContainers() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress1)
.withMemoryInMb(100).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress2)
.withMemoryInMb(100).create();
docker.startContainer(containerName2);
assertThat(docker.executeInContainerAsRoot(containerName2, "python", "/pysrc/fillmem.py", "90").getExitStatus(), is(137));
testReachabilityFromHost("http:
testReachabilityFromHost("http:
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Test
public void testContainerCycle() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.dockerClient.pauseContainerCmd(containerName.asString()).exec();
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.PAUSED);
docker.dockerClient.unpauseContainerCmd(containerName.asString()).exec();
docker.stopContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.EXITED);
docker.deleteContainer(containerName);
assertThat(docker.getAllContainersManagedBy(MANAGER_NAME).isEmpty(), is(true));
}
/**
* Test the expected behavior for exec when it times out - it should throw an exception when it times out,
* and before the process completes.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow but lower than the process time.
*/
@Test(expected = DockerExecTimeoutException.class, timeout = 2000)
/**
* Test the expected behavior for exec that completes before specified timeout - it should return when the process finishes and not
* wait for the timeout. Some previous tests indicated that this was not behaving correctly.
*
* No timeout implies infinite timeout.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow
*/
@Test(timeout = 4000)
public void testContainerExecDoesNotBlockUntilTimeoutWhenCommandFinishesBeforeTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
docker.startContainer(containerName);
docker.executeInContainerAsRoot(containerName, 2L, "sh", "-c", "echo hei");
docker.executeInContainerAsRoot(containerName,"sh", "-c", "echo hei");
}
@Test
public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress1).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress2).create();
docker.startContainer(containerName2);
testReachabilityFromHost("http:
testReachabilityFromHost("http:
String[] curlFromNodeToNode = new String[]{"curl", "-g", "http:
ProcessResult result = docker.executeInContainerAsRoot(containerName1, curlFromNodeToNode);
assertThat("Could not reach " + containerName2.asString() + " from " + containerName1.asString(),
result.getOutput(), is("pong\n"));
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Before
public void setup() throws InterruptedException, ExecutionException, IOException {
if (docker == null) {
assumeTrue(DockerTestUtils.dockerDaemonIsPresent());
docker = DockerTestUtils.getDocker();
DockerTestUtils.buildSimpleHttpServerDockerImage(docker, dockerImage);
}
docker.getAllContainersManagedBy(MANAGER_NAME).forEach(container -> {
if (container.state.isRunning()) docker.stopContainer(container.name);
docker.deleteContainer(container.name);
});
}
private void testReachabilityFromHost(String target) throws IOException, InterruptedException {
URL url = new URL(target);
String containerServer = IOUtils.toString(url.openStream());
assertThat(containerServer, is("pong\n"));
}
} |
I can probably combine the blocking tests but I think the non-blocking (i.e where the timeout is reached) is semantically different and should be in a separate test. And I would avoid catching exception in the test, assert on it and continue. | public void testContainerExecHounorsTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.of(1L), "sh", "-c", "sleep 5");
} | final ContainerName containerName = new ContainerName("docker-test-foo"); | public void testContainerExecHounorsTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
docker.startContainer(containerName);
docker.executeInContainerAsRoot(containerName, 1L, "sh", "-c", "sleep 5");
} | class DockerTest {
private DockerImpl docker;
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
private static final String MANAGER_NAME = "docker-test";
@Ignore
@Test
public void testDockerImagePullDelete() throws ExecutionException, InterruptedException {
DockerImage dockerImage = new DockerImage("busybox:1.24.0");
docker.pullImageAsync(dockerImage).get();
assertTrue("Failed to download " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
docker.deleteImage(dockerImage);
assertFalse("Failed to delete " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
}
@Ignore
@Test
public void testOutOfMemoryDoesNotAffectOtherContainers() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress1)
.withMemoryInMb(100).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress2)
.withMemoryInMb(100).create();
docker.startContainer(containerName2);
assertThat(docker.executeInContainerAsRoot(containerName2, Optional.empty(), "python", "/pysrc/fillmem.py", "90").getExitStatus(), is(137));
testReachabilityFromHost("http:
testReachabilityFromHost("http:
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Test
public void testContainerCycle() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.dockerClient.pauseContainerCmd(containerName.asString()).exec();
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.PAUSED);
docker.dockerClient.unpauseContainerCmd(containerName.asString()).exec();
docker.stopContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.EXITED);
docker.deleteContainer(containerName);
assertThat(docker.getAllContainersManagedBy(MANAGER_NAME).isEmpty(), is(true));
}
/**
* Test the expected behavior for exec when it times out - it should throw an exception with it times out and before the process completes
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow but lower than the process time.
*/
@Test(expected = DockerExecTimeoutException.class, timeout = 4000)
/**
* Test the expected behavior for exec that completes before specified timeout - it should return when the process finishes and not
* wait for the timeout. Some previous tests indicated that this was not behaving correctly.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow
*/
@Test(timeout = 4000)
public void testContainerExecReturnsBeforeTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.of(5L), "sh", "-c", "echo hei");
}
/**
* Test the expected behavior for exec without timeout - it should wait for the process as long as it takes and
* not throw exceptions.
*/
@Test
public void testContainerExecNoTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.executeInContainerAsRoot(containerName, Optional.empty(), "sh", "-c", "sleep 2");
}
@Test
public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress1).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress2).create();
docker.startContainer(containerName2);
testReachabilityFromHost("http:
testReachabilityFromHost("http:
String[] curlFromNodeToNode = new String[]{"curl", "-g", "http:
ProcessResult result = docker.executeInContainerAsRoot(containerName1, Optional.empty(), curlFromNodeToNode);
assertThat("Could not reach " + containerName2.asString() + " from " + containerName1.asString(),
result.getOutput(), is("pong\n"));
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Before
public void setup() throws InterruptedException, ExecutionException, IOException {
if (docker == null) {
assumeTrue(DockerTestUtils.dockerDaemonIsPresent());
docker = DockerTestUtils.getDocker();
DockerTestUtils.buildSimpleHttpServerDockerImage(docker, dockerImage);
}
docker.getAllContainersManagedBy(MANAGER_NAME).forEach(container -> {
if (container.state.isRunning()) docker.stopContainer(container.name);
docker.deleteContainer(container.name);
});
}
private void testReachabilityFromHost(String target) throws IOException, InterruptedException {
URL url = new URL(target);
String containerServer = IOUtils.toString(url.openStream());
assertThat(containerServer, is("pong\n"));
}
} | class DockerTest {
private DockerImpl docker;
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
private static final String MANAGER_NAME = "docker-test";
@Ignore
@Test
public void testDockerImagePullDelete() throws ExecutionException, InterruptedException {
DockerImage dockerImage = new DockerImage("busybox:1.24.0");
docker.pullImageAsync(dockerImage).get();
assertTrue("Failed to download " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
docker.deleteImage(dockerImage);
assertFalse("Failed to delete " + dockerImage.asString() + " image", docker.imageIsDownloaded(dockerImage));
}
@Ignore
@Test
public void testOutOfMemoryDoesNotAffectOtherContainers() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress1)
.withMemoryInMb(100).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2)
.withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME)
.withIpAddress(inetAddress2)
.withMemoryInMb(100).create();
docker.startContainer(containerName2);
assertThat(docker.executeInContainerAsRoot(containerName2, "python", "/pysrc/fillmem.py", "90").getExitStatus(), is(137));
testReachabilityFromHost("http:
testReachabilityFromHost("http:
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Test
public void testContainerCycle() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
Optional<Container> container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.CREATED);
docker.startContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.RUNNING);
docker.dockerClient.pauseContainerCmd(containerName.asString()).exec();
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.PAUSED);
docker.dockerClient.unpauseContainerCmd(containerName.asString()).exec();
docker.stopContainer(containerName);
container = docker.getContainer(containerName);
assertTrue(container.isPresent());
assertEquals(container.get().state, Container.State.EXITED);
docker.deleteContainer(containerName);
assertThat(docker.getAllContainersManagedBy(MANAGER_NAME).isEmpty(), is(true));
}
/**
* Test the expected behavior for exec when it times out - it should throw an exception when it times out,
* and before the process completes.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow but lower than the process time.
*/
@Test(expected = DockerExecTimeoutException.class, timeout = 2000)
/**
* Test the expected behavior for exec that completes before specified timeout - it should return when the process finishes and not
* wait for the timeout. Some previous tests indicated that this was not behaving correctly.
*
* No timeout implies infinite timeout.
*
* The test timeout value is set quite high to avoid noise if screwdriver is slow
*/
@Test(timeout = 4000)
public void testContainerExecDoesNotBlockUntilTimeoutWhenCommandFinishesBeforeTimeout() throws IOException, InterruptedException, ExecutionException {
final ContainerName containerName = new ContainerName("docker-test-foo");
final String containerHostname = "hostName1";
docker.createContainerCommand(dockerImage, containerName, containerHostname).withManagedBy(MANAGER_NAME).create();
docker.startContainer(containerName);
docker.executeInContainerAsRoot(containerName, 2L, "sh", "-c", "echo hei");
docker.executeInContainerAsRoot(containerName,"sh", "-c", "echo hei");
}
@Test
public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException {
String hostName1 = "docker10.test.yahoo.com";
String hostName2 = "docker11.test.yahoo.com";
ContainerName containerName1 = new ContainerName("docker-test-1");
ContainerName containerName2 = new ContainerName("docker-test-2");
InetAddress inetAddress1 = InetAddress.getByName("172.18.10.10");
InetAddress inetAddress2 = InetAddress.getByName("172.18.10.11");
docker.createContainerCommand(dockerImage, containerName1, hostName1).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress1).create();
docker.startContainer(containerName1);
docker.createContainerCommand(dockerImage, containerName2, hostName2).withManagedBy(MANAGER_NAME)
.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).withIpAddress(inetAddress2).create();
docker.startContainer(containerName2);
testReachabilityFromHost("http:
testReachabilityFromHost("http:
String[] curlFromNodeToNode = new String[]{"curl", "-g", "http:
ProcessResult result = docker.executeInContainerAsRoot(containerName1, curlFromNodeToNode);
assertThat("Could not reach " + containerName2.asString() + " from " + containerName1.asString(),
result.getOutput(), is("pong\n"));
docker.stopContainer(containerName1);
docker.deleteContainer(containerName1);
docker.stopContainer(containerName2);
docker.deleteContainer(containerName2);
}
@Before
public void setup() throws InterruptedException, ExecutionException, IOException {
if (docker == null) {
assumeTrue(DockerTestUtils.dockerDaemonIsPresent());
docker = DockerTestUtils.getDocker();
DockerTestUtils.buildSimpleHttpServerDockerImage(docker, dockerImage);
}
docker.getAllContainersManagedBy(MANAGER_NAME).forEach(container -> {
if (container.state.isRunning()) docker.stopContainer(container.name);
docker.deleteContainer(container.name);
});
}
private void testReachabilityFromHost(String target) throws IOException, InterruptedException {
URL url = new URL(target);
String containerServer = IOUtils.toString(url.openStream());
assertThat(containerServer, is("pong\n"));
}
} |
We should not auto-retire VMs, since they cannot be dual-stacked. | public boolean shouldRetire(Node node) {
return node.ipAddresses().stream()
.map(InetAddresses::forString)
.allMatch(address -> address instanceof Inet4Address);
} | .allMatch(address -> address instanceof Inet4Address); | public boolean shouldRetire(Node node) {
if (node.flavor().getType() == Flavor.Type.VIRTUAL_MACHINE) return false;
return node.ipAddresses().stream()
.map(InetAddresses::forString)
.allMatch(address -> address instanceof Inet4Address);
} | class RetireIPv4OnlyNodes implements RetirementPolicy {
@Override
} | class RetireIPv4OnlyNodes implements RetirementPolicy {
@Override
} |
Add log of this event. | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
long numSpareReadyNodesForCurrentFlavor = numSpareNodesByFlavor.get(entry.getKey());
entry.getValue().stream()
.limit(numSpareReadyNodesForCurrentFlavor)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.system));
return numSpareReadyNodesForCurrentFlavor < entry.getValue().size();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | .forEach(node -> nodeRepository().park(node.hostname(), Agent.system)); | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Duration interval, RetirementPolicy retirementPolicy) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
long numNodesToToSpare = (long) Math.max(2, 0.1 * numActiveNodesByCurrentFlavor);
return Math.max(0L, entry.getValue() - numNodesToToSpare);
}));
}
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
I suggest this lambda is extracted for documentation and testing. | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
long numSpareReadyNodesForCurrentFlavor = numSpareNodesByFlavor.get(entry.getKey());
entry.getValue().stream()
.limit(numSpareReadyNodesForCurrentFlavor)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.system));
return numSpareReadyNodesForCurrentFlavor < entry.getValue().size();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | return numSpareReadyNodesForCurrentFlavor < entry.getValue().size(); | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Duration interval, RetirementPolicy retirementPolicy) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
long numNodesToToSpare = (long) Math.max(2, 0.1 * numActiveNodesByCurrentFlavor);
return Math.max(0L, entry.getValue() - numNodesToToSpare);
}));
}
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
The `reason` ("Parked by NodeRetirer") gets logged, while the `Agent` is written to node history, but not logged. (See: https://github.com/yahoo/vespa/blob/ee3f2092b5e6a91944d36d17f8ed8fa5242e91aa/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java#L153-L181) | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | .forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName())); | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
Fixed. | public boolean shouldRetire(Node node) {
return node.ipAddresses().stream()
.map(InetAddresses::forString)
.allMatch(address -> address instanceof Inet4Address);
} | .allMatch(address -> address instanceof Inet4Address); | public boolean shouldRetire(Node node) {
if (node.flavor().getType() == Flavor.Type.VIRTUAL_MACHINE) return false;
return node.ipAddresses().stream()
.map(InetAddresses::forString)
.allMatch(address -> address instanceof Inet4Address);
} | class RetireIPv4OnlyNodes implements RetirementPolicy {
@Override
} | class RetireIPv4OnlyNodes implements RetirementPolicy {
@Override
} |
Fixed. | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
long numSpareReadyNodesForCurrentFlavor = numSpareNodesByFlavor.get(entry.getKey());
entry.getValue().stream()
.limit(numSpareReadyNodesForCurrentFlavor)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.system));
return numSpareReadyNodesForCurrentFlavor < entry.getValue().size();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | return numSpareReadyNodesForCurrentFlavor < entry.getValue().size(); | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Duration interval, RetirementPolicy retirementPolicy) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
long numNodesToToSpare = (long) Math.max(2, 0.1 * numActiveNodesByCurrentFlavor);
return Math.max(0L, entry.getValue() - numNodesToToSpare);
}));
}
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
Fixed. | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
long numSpareReadyNodesForCurrentFlavor = numSpareNodesByFlavor.get(entry.getKey());
entry.getValue().stream()
.limit(numSpareReadyNodesForCurrentFlavor)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.system));
return numSpareReadyNodesForCurrentFlavor < entry.getValue().size();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | .forEach(node -> nodeRepository().park(node.hostname(), Agent.system)); | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Duration interval, RetirementPolicy retirementPolicy) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
long numNodesToToSpare = (long) Math.max(2, 0.1 * numActiveNodesByCurrentFlavor);
return Math.max(0L, entry.getValue() - numNodesToToSpare);
}));
}
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
No it isn't | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
long numSpareReadyNodesForCurrentFlavor = numSpareNodesByFlavor.get(entry.getKey());
entry.getValue().stream()
.limit(numSpareReadyNodesForCurrentFlavor)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.system));
return numSpareReadyNodesForCurrentFlavor < entry.getValue().size();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | return numSpareReadyNodesForCurrentFlavor < entry.getValue().size(); | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Duration interval, RetirementPolicy retirementPolicy) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
long numNodesToToSpare = (long) Math.max(2, 0.1 * numActiveNodesByCurrentFlavor);
return Math.max(0L, entry.getValue() - numNodesToToSpare);
}));
}
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
"ToTo" -> "To" | long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToToSpare);
} | long numNodesToToSpare = (long) Math.ceil(0.1 * numActiveNodes); | long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
if (! Arrays.asList(applies).contains(zone)) deconstruct();
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
} |
Apperantly it links to old commit if you click the "View" button here. If you look at the current code in "Files changed" tab (https://github.com/yahoo/vespa/pull/2224/files), then the lambda has been extracted to `limitedPark()` | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
long numSpareReadyNodesForCurrentFlavor = numSpareNodesByFlavor.get(entry.getKey());
entry.getValue().stream()
.limit(numSpareReadyNodesForCurrentFlavor)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.system));
return numSpareReadyNodesForCurrentFlavor < entry.getValue().size();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | return numSpareReadyNodesForCurrentFlavor < entry.getValue().size(); | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Duration interval, RetirementPolicy retirementPolicy) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
long numNodesToToSpare = (long) Math.max(2, 0.1 * numActiveNodesByCurrentFlavor);
return Math.max(0L, entry.getValue() - numNodesToToSpare);
}));
}
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
I can see limitedPark, but it's called inside a lambda? | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
long numSpareReadyNodesForCurrentFlavor = numSpareNodesByFlavor.get(entry.getKey());
entry.getValue().stream()
.limit(numSpareReadyNodesForCurrentFlavor)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.system));
return numSpareReadyNodesForCurrentFlavor < entry.getValue().size();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | return numSpareReadyNodesForCurrentFlavor < entry.getValue().size(); | boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Duration interval, RetirementPolicy retirementPolicy) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
long numNodesToToSpare = (long) Math.max(2, 0.1 * numActiveNodesByCurrentFlavor);
return Math.max(0L, entry.getValue() - numNodesToToSpare);
}));
}
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
Fixed. | long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToToSpare);
} | long numNodesToToSpare = (long) Math.ceil(0.1 * numActiveNodes); | long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
} | class NodeRetirer extends Maintainer {
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval);
this.retirementPolicy = retirementPolicy;
if (! Arrays.asList(applies).contains(zone)) deconstruct();
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
@Override
public String toString() {
return "Node retirer";
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
}
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
} |
The "Parked by NodeRetirer" part of the comment could have been derived by park() from its agent argument? | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | .forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName())); | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
Perhaps it would make sense to add agent to the log message, in case the reason could be shortened to just the policy name. | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | .forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName())); | boolean limitedPark(Set<Node> nodesToPark, long limit) {
nodesToPark.stream()
.limit(limit)
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName()));
return limit >= nodesToPark.size();
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} | class NodeRetirer extends Maintainer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl,
RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.retirementPolicy = retirementPolicy;
}
@Override
protected void maintain() {
retireUnallocated();
}
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes();
Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey());
return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor);
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* @param nodesToPark Nodes that we want to park
* @param limit Maximum number of nodes we want to park
* @return True iff we were able to park all the nodes
*/
Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) {
Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
return allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()))
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L);
return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue());
}));
}
/**
* Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active
* and ready nodes.
*/
long getNumSpareNodes(long numActiveNodes, long numReadyNodes) {
long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes);
return Math.max(0L, numReadyNodes - numNodesToSpare);
}
} |
You should log the stack trace too. | public void run() {
log.info("Starting deconstruction of " + component);
try {
component.deconstruct();
log.info("Finished deconstructing " + component);
} catch (Exception e) {
log.warning("Exception thrown when deconstructing " + component + ": " + e.getClass().getName()
+ ": " + Exceptions.toMessageString(e));
}
} | } | public void run() {
log.info("Starting deconstruction of " + component);
try {
component.deconstruct();
log.info("Finished deconstructing " + component);
} catch (Exception e) {
log.log(WARNING, "Exception thrown when deconstructing " + component, e);
} catch (Throwable t) {
com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, t);
}
} | class DestructComponentTask implements Runnable {
private final AbstractComponent component;
DestructComponentTask(AbstractComponent component) {
this.component = component;
}
} | class DestructComponentTask implements Runnable {
private final AbstractComponent component;
DestructComponentTask(AbstractComponent component) {
this.component = component;
}
} |
You do not thing we should catch Error explicit ? | public void run() {
log.info("Starting deconstruction of " + component);
try {
component.deconstruct();
log.info("Finished deconstructing " + component);
} catch (Exception e) {
log.log(WARNING, "Exception thrown when deconstructing " + component, e);
} catch (Throwable t) {
com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, t);
}
} | } catch (Throwable t) { | public void run() {
log.info("Starting deconstruction of " + component);
try {
component.deconstruct();
log.info("Finished deconstructing " + component);
} catch (Exception e) {
log.log(WARNING, "Exception thrown when deconstructing " + component, e);
} catch (Throwable t) {
com.yahoo.protect.Process.logAndDie("Error when deconstructing " + component, t);
}
} | class DestructComponentTask implements Runnable {
private final AbstractComponent component;
DestructComponentTask(AbstractComponent component) {
this.component = component;
}
} | class DestructComponentTask implements Runnable {
private final AbstractComponent component;
DestructComponentTask(AbstractComponent component) {
this.component = component;
}
} |
If you want to make this even more legible, you can import the class 'ProtonConfig.Indexing.Write.Io', so it will read: ``` builder.indexing.write.io(Io.Enum.valueOf(write.name)); ``` | public void getConfig(ProtonConfig.Builder builder) {
if (write != null) {
builder.indexing.write.io(ProtonConfig.Indexing.Write.Io.Enum.valueOf(write.name));
}
if (read != null) {
builder.indexing.read.io(ProtonConfig.Indexing.Read.Io.Enum.valueOf(read.name));
}
if (search != null) {
builder.search.io(ProtonConfig.Search.Io.Enum.valueOf(search.name));
}
} | builder.indexing.write.io(ProtonConfig.Indexing.Write.Io.Enum.valueOf(write.name)); | public void getConfig(ProtonConfig.Builder builder) {
if (numSearchThreads!=null) builder.numsearcherthreads(numSearchThreads);
if (numThreadsPerSearch!=null) builder.numthreadspersearch(numThreadsPerSearch);
if (numSummaryThreads!=null) builder.numsummarythreads(numSummaryThreads);
} | class RequestThreads implements ProtonConfig.Producer {
public Integer numSearchThreads = null;
public Integer numThreadsPerSearch = null;
public Integer numSummaryThreads = null;
@Override
} | class RequestThreads implements ProtonConfig.Producer {
public Integer numSearchThreads = null;
public Integer numThreadsPerSearch = null;
public Integer numSummaryThreads = null;
@Override
} |
I like your suggestion, but in practice we have a problem with many sub-classes named Io: ProtonConfig.Summary.Write.Io, ProtonConfig.Summary.Read.Io, ProtonConfig.Search.Io | public void getConfig(ProtonConfig.Builder builder) {
if (write != null) {
builder.indexing.write.io(ProtonConfig.Indexing.Write.Io.Enum.valueOf(write.name));
}
if (read != null) {
builder.indexing.read.io(ProtonConfig.Indexing.Read.Io.Enum.valueOf(read.name));
}
if (search != null) {
builder.search.io(ProtonConfig.Search.Io.Enum.valueOf(search.name));
}
} | builder.indexing.write.io(ProtonConfig.Indexing.Write.Io.Enum.valueOf(write.name)); | public void getConfig(ProtonConfig.Builder builder) {
if (numSearchThreads!=null) builder.numsearcherthreads(numSearchThreads);
if (numThreadsPerSearch!=null) builder.numthreadspersearch(numThreadsPerSearch);
if (numSummaryThreads!=null) builder.numsummarythreads(numSummaryThreads);
} | class RequestThreads implements ProtonConfig.Producer {
public Integer numSearchThreads = null;
public Integer numThreadsPerSearch = null;
public Integer numSummaryThreads = null;
@Override
} | class RequestThreads implements ProtonConfig.Producer {
public Integer numSearchThreads = null;
public Integer numThreadsPerSearch = null;
public Integer numSummaryThreads = null;
@Override
} |
Layer violation: The serialization for persistence is independent of the serialization in the web service API, but here you are referring to the rest-api serialization format in a persistence layer test. I suggest you just inline the content of that file in the test. Also, you can add a TODO on removing that part of the test after May 2017. | public void serialize_additional_ip_addresses() throws IOException {
Node node = createNode();
node = node.withAdditionalIpAddresses(ImmutableSet.of("10.0.0.1", "10.0.0.2", "10.0.0.3"));
Node copy = nodeSerializer.fromJson(node.state(), nodeSerializer.toJson(node));
assertEquals(node.additionalIpAddresses(), copy.additionalIpAddresses());
node = createNode();
copy = nodeSerializer.fromJson(node.state(), nodeSerializer.toJson(node));
assertEquals(node.additionalIpAddresses(), copy.additionalIpAddresses());
byte[] jsonBeforeAdditionalIps = IOUtils.readFileBytes(new File("src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node1-before-additional-ip-addresses.json"));
node = nodeSerializer.fromJson(State.active, jsonBeforeAdditionalIps);
assertEquals(Collections.emptySet(), node.additionalIpAddresses());
} | node = nodeSerializer.fromJson(State.active, jsonBeforeAdditionalIps); | public void serialize_additional_ip_addresses() throws IOException {
Node node = createNode();
node = node.withAdditionalIpAddresses(ImmutableSet.of("10.0.0.1", "10.0.0.2", "10.0.0.3"));
Node copy = nodeSerializer.fromJson(node.state(), nodeSerializer.toJson(node));
assertEquals(node.additionalIpAddresses(), copy.additionalIpAddresses());
node = createNode();
copy = nodeSerializer.fromJson(node.state(), nodeSerializer.toJson(node));
assertEquals(node.additionalIpAddresses(), copy.additionalIpAddresses());
String json = "{\n" +
" \"url\": \"http:
" \"id\": \"host1.yahoo.com\",\n" +
" \"state\": \"active\",\n" +
" \"type\": \"tenant\",\n" +
" \"hostname\": \"host1.yahoo.com\",\n" +
" \"openStackId\": \"node1\",\n" +
" \"flavor\": \"default\",\n" +
" \"canonicalFlavor\": \"default\",\n" +
" \"minDiskAvailableGb\":400.0,\n" +
" \"minMainMemoryAvailableGb\":16.0,\n" +
" \"description\":\"Flavor-name-is-default\",\n" +
" \"minCpuCores\":2.0,\n" +
" \"environment\":\"BARE_METAL\",\n" +
" \"owner\": {\n" +
" \"tenant\": \"tenant2\",\n" +
" \"application\": \"application2\",\n" +
" \"instance\": \"instance2\"\n" +
" },\n" +
" \"membership\": {\n" +
" \"clustertype\": \"content\",\n" +
" \"clusterid\": \"id2\",\n" +
" \"group\": \"0\",\n" +
" \"index\": 0,\n" +
" \"retired\": false\n" +
" },\n" +
" \"restartGeneration\": 0,\n" +
" \"currentRestartGeneration\": 0,\n" +
" \"wantedDockerImage\":\"docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.0\",\n" +
" \"wantedVespaVersion\":\"6.42.0\",\n" +
" \"rebootGeneration\": 1,\n" +
" \"currentRebootGeneration\": 0,\n" +
" \"failCount\": 0,\n" +
" \"wantToRetire\" : false,\n" +
" \"history\":[{\"type\":\"readied\",\"at\":123,\"type\":\"system\"},{\"type\":\"reserved\",\"at\":123,\"agent\":\"application\"},{\"type\":\"activated\",\"at\":123,\"agent\":\"application\"}],\n" +
" \"ipAddresses\":[\"::1\", \"127.0.0.1\"]\n" +
"}";
node = nodeSerializer.fromJson(State.active, Utf8.toBytes(json));
assertEquals(Collections.emptySet(), node.additionalIpAddresses());
} | class SerializationTest {
private final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "large", "ugccloud-container");
private final NodeSerializer nodeSerializer = new NodeSerializer(nodeFlavors);
private final ManualClock clock = new ManualClock();
@Test
public void testProvisionedNodeSerialization() {
Node node = createNode();
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(node.id(), copy.id());
assertEquals(node.hostname(), copy.hostname());
assertEquals(node.state(), copy.state());
assertFalse(copy.allocation().isPresent());
assertEquals(0, copy.history().events().size());
}
@Test
public void testReservedNodeSerialization() {
Node node = createNode();
clock.advance(Duration.ofMinutes(3));
assertEquals(0, node.history().events().size());
node = node.allocate(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
ClusterMembership.from("content/myId/0/0", Vtag.currentVersion),
clock.instant());
assertEquals(1, node.history().events().size());
node = node.withRestart(new Generation(1, 2));
node = node.withReboot(new Generation(3, 4));
node = node.with(FlavorConfigBuilder.createDummies("large").getFlavorOrThrow("large"));
node = node.with(node.status().withVespaVersion(Version.fromString("1.2.3")));
node = node.with(node.status().withIncreasedFailCount().withIncreasedFailCount());
node = node.with(node.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.memory_mcelog)));
node = node.with(NodeType.tenant);
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(node.id(), copy.id());
assertEquals(node.hostname(), copy.hostname());
assertEquals(node.state(), copy.state());
assertEquals(1, copy.allocation().get().restartGeneration().wanted());
assertEquals(2, copy.allocation().get().restartGeneration().current());
assertEquals(3, copy.status().reboot().wanted());
assertEquals(4, copy.status().reboot().current());
assertEquals("large", copy.flavor().name());
assertEquals("1.2.3", copy.status().vespaVersion().get().toString());
assertEquals(2, copy.status().failCount());
assertEquals(Status.HardwareFailureType.memory_mcelog, copy.status().hardwareFailure().get());
assertEquals(node.allocation().get().owner(), copy.allocation().get().owner());
assertEquals(node.allocation().get().membership(), copy.allocation().get().membership());
assertEquals(node.allocation().get().isRemovable(), copy.allocation().get().isRemovable());
assertEquals(1, copy.history().events().size());
assertEquals(clock.instant(), copy.history().event(History.Event.Type.reserved).get().at());
assertEquals(NodeType.tenant, copy.type());
}
@Test
public void testDefaultType() {
Node node = createNode().allocate(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
ClusterMembership.from("content/myId/0/0", Vtag.currentVersion),
clock.instant());
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(NodeType.host, copy.type());
}
@Test
public void testRebootAndRestartAndTypeNoCurrentValuesSerialization() {
String nodeData =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"rebootGeneration\" : 1,\n" +
" \"currentRebootGeneration\" : 2,\n" +
" \"flavor\" : \"large\",\n" +
" \"history\" : [\n" +
" {\n" +
" \"type\" : \"reserved\",\n" +
" \"at\" : 1444391402611\n" +
" }\n" +
" ],\n" +
" \"instance\" : {\n" +
" \"applicationId\" : \"myApplication\",\n" +
" \"tenantId\" : \"myTenant\",\n" +
" \"instanceId\" : \"myInstance\",\n" +
" \"serviceId\" : \"content/myId/0\",\n" +
" \"restartGeneration\" : 3,\n" +
" \"currentRestartGeneration\" : 4,\n" +
" \"removable\" : true\n" +
" },\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"]\n" +
"}";
Node node = nodeSerializer.fromJson(Node.State.provisioned, Utf8.toBytes(nodeData));
assertEquals("large", node.flavor().canonicalName());
assertEquals(1, node.status().reboot().wanted());
assertEquals(2, node.status().reboot().current());
assertEquals(3, node.allocation().get().restartGeneration().wanted());
assertEquals(4, node.allocation().get().restartGeneration().current());
assertTrue(node.allocation().get().isRemovable());
assertEquals(NodeType.tenant, node.type());
}
@Test
public void testRetiredNodeSerialization() {
Node node = createNode();
clock.advance(Duration.ofMinutes(3));
assertEquals(0, node.history().events().size());
node = node.allocate(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
ClusterMembership.from("content/myId/0", Vtag.currentVersion),
clock.instant());
assertEquals(1, node.history().events().size());
clock.advance(Duration.ofMinutes(2));
node = node.retire(Agent.application, clock.instant());
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(2, copy.history().events().size());
assertEquals(clock.instant(), copy.history().event(History.Event.Type.retired).get().at());
assertEquals(Agent.application,
(copy.history().event(History.Event.Type.retired).get()).agent());
assertTrue(copy.allocation().get().membership().retired());
Node removable = copy.with(node.allocation().get().removable());
Node removableCopy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(removable));
assertTrue(removableCopy.allocation().get().isRemovable());
}
@Test
public void testAssimilatedDeserialization() {
Node node = nodeSerializer.fromJson(Node.State.active, ("{\n" +
" \"type\": \"tenant\",\n" +
" \"hostname\": \"assimilate2.vespahosted.yahoo.tld\",\n" +
" \"ipAddresses\": [\"127.0.0.1\"],\n" +
" \"openStackId\": \"\",\n" +
" \"flavor\": \"ugccloud-container\",\n" +
" \"instance\": {\n" +
" \"tenantId\": \"by_mortent\",\n" +
" \"applicationId\": \"ugc-assimilate\",\n" +
" \"instanceId\": \"default\",\n" +
" \"serviceId\": \"container/ugccloud-container/0/0\",\n" +
" \"restartGeneration\": 0\n" +
" }\n" +
"}\n").getBytes());
assertEquals(0, node.history().events().size());
assertTrue(node.allocation().isPresent());
assertEquals("ugccloud-container", node.allocation().get().membership().cluster().id().value());
assertEquals("container", node.allocation().get().membership().cluster().type().name());
assertEquals(0, node.allocation().get().membership().cluster().group().get().index());
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(0, copy.history().events().size());
}
@Test
public void testSetFailCount() {
Node node = createNode();
node = node.allocate(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
ClusterMembership.from("content/myId/0/0", Vtag.currentVersion),
clock.instant());
node = node.with(node.status().setFailCount(0));
Node copy2 = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(0, copy2.status().failCount());
}
@Test
public void serialize_docker_image() {
Node node = createNode();
Version version = new DockerImage("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.0").tagAsVersion();
ClusterMembership clusterMembership = ClusterMembership.from("content/myId/0", version);
Node nodeWithAllocation = node.with(
new Allocation(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
clusterMembership,
new Generation(0, 0),
false));
Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(nodeWithAllocation));
assertEquals("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.0", deserializedNode.allocation().get().membership().cluster().dockerImage());
}
@Test
public void serialize_parentHostname() {
final String parentHostname = "parent.yahoo.com";
Node node = Node.create("myId", singleton("127.0.0.1"), Collections.emptySet(), "myHostname", Optional.of(parentHostname), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant);
Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(node));
assertEquals(parentHostname, deserializedNode.parentHostname().get());
}
@Test
public void serializes_multiple_ip_addresses() {
byte[] nodeWithMultipleIps = createNodeJson("node4.yahoo.tld", "127.0.0.4", "::4");
Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeWithMultipleIps);
assertEquals(ImmutableSet.of("127.0.0.4", "::4"), deserializedNode.ipAddresses());
}
@Test
@Test
public void want_to_retire_defaults_to_false() {
String nodeData =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"]\n" +
"}";
Node node = nodeSerializer.fromJson(State.provisioned, Utf8.toBytes(nodeData));
assertFalse(node.status().wantToRetire());
}
@Test
public void vespa_version_serialization() throws Exception {
String nodeWithDockerImage =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"],\n" +
" \"instance\": {\n" +
" \"serviceId\": \"content/myId/0\",\n" +
" \"dockerImage\": \"docker-registry.some.domain:4443/vespa/ci:6.42.1\"\n" +
" }\n" +
"}";
Node node = nodeSerializer.fromJson(State.active, Utf8.toBytes(nodeWithDockerImage));
assertEquals("6.42.1", node.allocation().get().membership().cluster().vespaVersion().toString());
assertEquals("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.1", node.allocation().get().membership().cluster().dockerImage());
String nodeWithWantedVespaVersion =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"],\n" +
" \"instance\": {\n" +
" \"serviceId\": \"content/myId/0\",\n" +
" \"wantedVespaVersion\": \"6.42.2\"\n" +
" }\n" +
"}";
node = nodeSerializer.fromJson(State.active, Utf8.toBytes(nodeWithWantedVespaVersion));
assertEquals("6.42.2", node.allocation().get().membership().cluster().vespaVersion().toString());
assertEquals("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.2", node.allocation().get().membership().cluster().dockerImage());
}
@Test
public void docker_image_is_derived_from_vespa_version() throws Exception {
String nodeData =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"],\n" +
" \"vespaVersion\": \"6.42.1\"\n" +
"}";
Node node = nodeSerializer.fromJson(State.active, Utf8.toBytes(nodeData));
assertEquals("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.1", node.status().dockerImage().get());
}
private byte[] createNodeJson(String hostname, String... ipAddress) {
String ipAddressJsonPart = "";
if (ipAddress.length > 0) {
ipAddressJsonPart = "\"ipAddresses\":[" +
Arrays.stream(ipAddress)
.map(ip -> "\"" + ip + "\"")
.collect(Collectors.joining(",")) +
"],";
}
return ("{\"hostname\":\"" + hostname + "\"," +
ipAddressJsonPart +
"\"openStackId\":\"myId\"," +
"\"flavor\":\"default\",\"rebootGeneration\":0," +
"\"currentRebootGeneration\":0,\"failCount\":0,\"history\":[],\"type\":\"tenant\"}")
.getBytes(StandardCharsets.UTF_8);
}
private Node createNode() {
return Node.create("myId", singleton("127.0.0.1"), Collections.emptySet(), "myHostname", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host);
}
} | class SerializationTest {
private final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "large", "ugccloud-container");
private final NodeSerializer nodeSerializer = new NodeSerializer(nodeFlavors);
private final ManualClock clock = new ManualClock();
@Test
public void testProvisionedNodeSerialization() {
Node node = createNode();
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(node.id(), copy.id());
assertEquals(node.hostname(), copy.hostname());
assertEquals(node.state(), copy.state());
assertFalse(copy.allocation().isPresent());
assertEquals(0, copy.history().events().size());
}
@Test
public void testReservedNodeSerialization() {
Node node = createNode();
clock.advance(Duration.ofMinutes(3));
assertEquals(0, node.history().events().size());
node = node.allocate(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
ClusterMembership.from("content/myId/0/0", Vtag.currentVersion),
clock.instant());
assertEquals(1, node.history().events().size());
node = node.withRestart(new Generation(1, 2));
node = node.withReboot(new Generation(3, 4));
node = node.with(FlavorConfigBuilder.createDummies("large").getFlavorOrThrow("large"));
node = node.with(node.status().withVespaVersion(Version.fromString("1.2.3")));
node = node.with(node.status().withIncreasedFailCount().withIncreasedFailCount());
node = node.with(node.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.memory_mcelog)));
node = node.with(NodeType.tenant);
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(node.id(), copy.id());
assertEquals(node.hostname(), copy.hostname());
assertEquals(node.state(), copy.state());
assertEquals(1, copy.allocation().get().restartGeneration().wanted());
assertEquals(2, copy.allocation().get().restartGeneration().current());
assertEquals(3, copy.status().reboot().wanted());
assertEquals(4, copy.status().reboot().current());
assertEquals("large", copy.flavor().name());
assertEquals("1.2.3", copy.status().vespaVersion().get().toString());
assertEquals(2, copy.status().failCount());
assertEquals(Status.HardwareFailureType.memory_mcelog, copy.status().hardwareFailure().get());
assertEquals(node.allocation().get().owner(), copy.allocation().get().owner());
assertEquals(node.allocation().get().membership(), copy.allocation().get().membership());
assertEquals(node.allocation().get().isRemovable(), copy.allocation().get().isRemovable());
assertEquals(1, copy.history().events().size());
assertEquals(clock.instant(), copy.history().event(History.Event.Type.reserved).get().at());
assertEquals(NodeType.tenant, copy.type());
}
@Test
public void testDefaultType() {
Node node = createNode().allocate(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
ClusterMembership.from("content/myId/0/0", Vtag.currentVersion),
clock.instant());
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(NodeType.host, copy.type());
}
@Test
public void testRebootAndRestartAndTypeNoCurrentValuesSerialization() {
String nodeData =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"rebootGeneration\" : 1,\n" +
" \"currentRebootGeneration\" : 2,\n" +
" \"flavor\" : \"large\",\n" +
" \"history\" : [\n" +
" {\n" +
" \"type\" : \"reserved\",\n" +
" \"at\" : 1444391402611\n" +
" }\n" +
" ],\n" +
" \"instance\" : {\n" +
" \"applicationId\" : \"myApplication\",\n" +
" \"tenantId\" : \"myTenant\",\n" +
" \"instanceId\" : \"myInstance\",\n" +
" \"serviceId\" : \"content/myId/0\",\n" +
" \"restartGeneration\" : 3,\n" +
" \"currentRestartGeneration\" : 4,\n" +
" \"removable\" : true\n" +
" },\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"]\n" +
"}";
Node node = nodeSerializer.fromJson(Node.State.provisioned, Utf8.toBytes(nodeData));
assertEquals("large", node.flavor().canonicalName());
assertEquals(1, node.status().reboot().wanted());
assertEquals(2, node.status().reboot().current());
assertEquals(3, node.allocation().get().restartGeneration().wanted());
assertEquals(4, node.allocation().get().restartGeneration().current());
assertTrue(node.allocation().get().isRemovable());
assertEquals(NodeType.tenant, node.type());
}
@Test
public void testRetiredNodeSerialization() {
Node node = createNode();
clock.advance(Duration.ofMinutes(3));
assertEquals(0, node.history().events().size());
node = node.allocate(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
ClusterMembership.from("content/myId/0", Vtag.currentVersion),
clock.instant());
assertEquals(1, node.history().events().size());
clock.advance(Duration.ofMinutes(2));
node = node.retire(Agent.application, clock.instant());
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(2, copy.history().events().size());
assertEquals(clock.instant(), copy.history().event(History.Event.Type.retired).get().at());
assertEquals(Agent.application,
(copy.history().event(History.Event.Type.retired).get()).agent());
assertTrue(copy.allocation().get().membership().retired());
Node removable = copy.with(node.allocation().get().removable());
Node removableCopy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(removable));
assertTrue(removableCopy.allocation().get().isRemovable());
}
@Test
public void testAssimilatedDeserialization() {
Node node = nodeSerializer.fromJson(Node.State.active, ("{\n" +
" \"type\": \"tenant\",\n" +
" \"hostname\": \"assimilate2.vespahosted.yahoo.tld\",\n" +
" \"ipAddresses\": [\"127.0.0.1\"],\n" +
" \"openStackId\": \"\",\n" +
" \"flavor\": \"ugccloud-container\",\n" +
" \"instance\": {\n" +
" \"tenantId\": \"by_mortent\",\n" +
" \"applicationId\": \"ugc-assimilate\",\n" +
" \"instanceId\": \"default\",\n" +
" \"serviceId\": \"container/ugccloud-container/0/0\",\n" +
" \"restartGeneration\": 0\n" +
" }\n" +
"}\n").getBytes());
assertEquals(0, node.history().events().size());
assertTrue(node.allocation().isPresent());
assertEquals("ugccloud-container", node.allocation().get().membership().cluster().id().value());
assertEquals("container", node.allocation().get().membership().cluster().type().name());
assertEquals(0, node.allocation().get().membership().cluster().group().get().index());
Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(0, copy.history().events().size());
}
@Test
public void testSetFailCount() {
Node node = createNode();
node = node.allocate(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
ClusterMembership.from("content/myId/0/0", Vtag.currentVersion),
clock.instant());
node = node.with(node.status().setFailCount(0));
Node copy2 = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
assertEquals(0, copy2.status().failCount());
}
@Test
public void serialize_docker_image() {
Node node = createNode();
Version version = new DockerImage("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.0").tagAsVersion();
ClusterMembership clusterMembership = ClusterMembership.from("content/myId/0", version);
Node nodeWithAllocation = node.with(
new Allocation(ApplicationId.from(TenantName.from("myTenant"),
ApplicationName.from("myApplication"),
InstanceName.from("myInstance")),
clusterMembership,
new Generation(0, 0),
false));
Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(nodeWithAllocation));
assertEquals("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.0", deserializedNode.allocation().get().membership().cluster().dockerImage());
}
@Test
public void serialize_parentHostname() {
final String parentHostname = "parent.yahoo.com";
Node node = Node.create("myId", singleton("127.0.0.1"), Collections.emptySet(), "myHostname", Optional.of(parentHostname), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant);
Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(node));
assertEquals(parentHostname, deserializedNode.parentHostname().get());
}
@Test
public void serializes_multiple_ip_addresses() {
byte[] nodeWithMultipleIps = createNodeJson("node4.yahoo.tld", "127.0.0.4", "::4");
Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeWithMultipleIps);
assertEquals(ImmutableSet.of("127.0.0.4", "::4"), deserializedNode.ipAddresses());
}
@Test
@Test
public void want_to_retire_defaults_to_false() {
String nodeData =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"]\n" +
"}";
Node node = nodeSerializer.fromJson(State.provisioned, Utf8.toBytes(nodeData));
assertFalse(node.status().wantToRetire());
}
@Test
public void vespa_version_serialization() throws Exception {
String nodeWithDockerImage =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"],\n" +
" \"instance\": {\n" +
" \"serviceId\": \"content/myId/0\",\n" +
" \"dockerImage\": \"docker-registry.some.domain:4443/vespa/ci:6.42.1\"\n" +
" }\n" +
"}";
Node node = nodeSerializer.fromJson(State.active, Utf8.toBytes(nodeWithDockerImage));
assertEquals("6.42.1", node.allocation().get().membership().cluster().vespaVersion().toString());
assertEquals("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.1", node.allocation().get().membership().cluster().dockerImage());
String nodeWithWantedVespaVersion =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"],\n" +
" \"instance\": {\n" +
" \"serviceId\": \"content/myId/0\",\n" +
" \"wantedVespaVersion\": \"6.42.2\"\n" +
" }\n" +
"}";
node = nodeSerializer.fromJson(State.active, Utf8.toBytes(nodeWithWantedVespaVersion));
assertEquals("6.42.2", node.allocation().get().membership().cluster().vespaVersion().toString());
assertEquals("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.2", node.allocation().get().membership().cluster().dockerImage());
}
@Test
public void docker_image_is_derived_from_vespa_version() throws Exception {
String nodeData =
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"],\n" +
" \"vespaVersion\": \"6.42.1\"\n" +
"}";
Node node = nodeSerializer.fromJson(State.active, Utf8.toBytes(nodeData));
assertEquals("docker-registry.ops.yahoo.com:4443/vespa/ci:6.42.1", node.status().dockerImage().get());
}
private byte[] createNodeJson(String hostname, String... ipAddress) {
String ipAddressJsonPart = "";
if (ipAddress.length > 0) {
ipAddressJsonPart = "\"ipAddresses\":[" +
Arrays.stream(ipAddress)
.map(ip -> "\"" + ip + "\"")
.collect(Collectors.joining(",")) +
"],";
}
return ("{\"hostname\":\"" + hostname + "\"," +
ipAddressJsonPart +
"\"openStackId\":\"myId\"," +
"\"flavor\":\"default\",\"rebootGeneration\":0," +
"\"currentRebootGeneration\":0,\"failCount\":0,\"history\":[],\"type\":\"tenant\"}")
.getBytes(StandardCharsets.UTF_8);
}
private Node createNode() {
return Node.create("myId", singleton("127.0.0.1"), Collections.emptySet(), "myHostname", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host);
}
} |
Not sure if this comment adds any value | OperationHandlerImpl createHandler() throws Exception {
VisitorSession visitorSession = mock(VisitorSession.class);
when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
VisitorParameters params = (VisitorParameters)p.getArguments()[0];
assignedParameters.set(params);
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(bucketsVisited);
params.getControlHandler().onVisitorStatistics(statistics);
ProgressToken progress = new ProgressToken();
params.getControlHandler().onProgress(progress);
params.getControlHandler().onDone(completionCode, "bork bork");
return visitorSession;
});
OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
return new OperationHandlerImpl(documentAccess, clusterEnumerator);
} | OperationHandlerImpl createHandler() throws Exception {
VisitorSession visitorSession = mock(VisitorSession.class);
when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
VisitorParameters params = (VisitorParameters)p.getArguments()[0];
assignedParameters.set(params);
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(bucketsVisited);
params.getControlHandler().onVisitorStatistics(statistics);
ProgressToken progress = new ProgressToken();
params.getControlHandler().onProgress(progress);
params.getControlHandler().onDone(completionCode, "bork bork");
return visitorSession;
});
OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
return new OperationHandlerImpl(documentAccess, clusterEnumerator);
} | class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
int bucketsVisited = 0;
} | class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
int bucketsVisited = 0;
} | |
It's true, though! 😬 But I'll remove it. | OperationHandlerImpl createHandler() throws Exception {
VisitorSession visitorSession = mock(VisitorSession.class);
when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
VisitorParameters params = (VisitorParameters)p.getArguments()[0];
assignedParameters.set(params);
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(bucketsVisited);
params.getControlHandler().onVisitorStatistics(statistics);
ProgressToken progress = new ProgressToken();
params.getControlHandler().onProgress(progress);
params.getControlHandler().onDone(completionCode, "bork bork");
return visitorSession;
});
OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
return new OperationHandlerImpl(documentAccess, clusterEnumerator);
} | OperationHandlerImpl createHandler() throws Exception {
VisitorSession visitorSession = mock(VisitorSession.class);
when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
VisitorParameters params = (VisitorParameters)p.getArguments()[0];
assignedParameters.set(params);
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(bucketsVisited);
params.getControlHandler().onVisitorStatistics(statistics);
ProgressToken progress = new ProgressToken();
params.getControlHandler().onProgress(progress);
params.getControlHandler().onDone(completionCode, "bork bork");
return visitorSession;
});
OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
return new OperationHandlerImpl(documentAccess, clusterEnumerator);
} | class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
int bucketsVisited = 0;
} | class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
int bucketsVisited = 0;
} | |
Trailing whitespace added. | public static DeploymentSpec fromXml(Reader reader) {
List<DeclaredZone> zones = new ArrayList<>();
Element root = XML.getDocument(reader).getDocumentElement();
Optional<String> globalServiceId = Optional.empty();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
List<Element> regionTags = XML.getChildren(environmentTag, "region");
if (regionTags.isEmpty()) {
zones.add(new DeclaredZone(environment, Optional.empty(), false));
}
else {
for (Element regionTag : regionTags) {
RegionName region = RegionName.from(XML.getValue(regionTag).trim());
boolean active = environment == Environment.prod && readActive(regionTag);
zones.add(new DeclaredZone(environment, Optional.of(region), active));
}
}
if (Environment.prod.equals(environment)) {
globalServiceId = readGlobalServiceId(environmentTag);
} else if (readGlobalServiceId(environmentTag).isPresent()) {
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), zones);
} | Optional<String> globalServiceId = Optional.empty(); | public static DeploymentSpec fromXml(Reader reader) {
List<DeclaredZone> zones = new ArrayList<>();
Element root = XML.getDocument(reader).getDocumentElement();
Optional<String> globalServiceId = Optional.empty();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
List<Element> regionTags = XML.getChildren(environmentTag, "region");
if (regionTags.isEmpty()) {
zones.add(new DeclaredZone(environment, Optional.empty(), false));
}
else {
for (Element regionTag : regionTags) {
RegionName region = RegionName.from(XML.getValue(regionTag).trim());
boolean active = environment == Environment.prod && readActive(regionTag);
zones.add(new DeclaredZone(environment, Optional.of(region), active));
}
}
if (Environment.prod.equals(environment)) {
globalServiceId = readGlobalServiceId(environmentTag);
} else if (readGlobalServiceId(environmentTag).isPresent()) {
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), zones);
} | class DeploymentSpec {
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<DeclaredZone> zones;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<DeclaredZone> zones) {
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.zones = Collections.unmodifiableList(new ArrayList<>(zones));
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the zones this declares as a read-only list. */
public List<DeclaredZone> zones() { return zones; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (DeclaredZone declaredZone : zones)
if (declaredZone.matches(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns whether we should assume this tag name is the name of an environment */
private static boolean isEnvironmentName(String tagName) {
if (tagName.equals("upgrade")) return false;
return true;
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main (String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
public static class DeclaredZone {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
public boolean matches(Environment environment, Optional<RegionName> region) {
if (environment.equals(this.environment) && region.equals(this.region)) return true;
if ( ! region.isPresent() && prerequisite(environment)) return true;
return false;
}
/**
* Returns whether deployment in the given environment is a prerequisite of deployment in this environment
*
* The required progression leading to prerequisites is test, staging, prod.
*/
private boolean prerequisite(Environment environment) {
if (this.environment == Environment.prod)
return environment == Environment.staging || environment == Environment.test;
if (this.environment == Environment.staging)
return environment == Environment.test;
return false;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<DeclaredZone> zones;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<DeclaredZone> zones) {
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.zones = ImmutableList.copyOf(zones);
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the zones this declares as a read-only list. */
public List<DeclaredZone> zones() { return zones; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (DeclaredZone declaredZone : zones)
if (declaredZone.matches(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main (String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
public static class DeclaredZone {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
public boolean matches(Environment environment, Optional<RegionName> region) {
if (environment.equals(this.environment) && region.equals(this.region)) return true;
if ( ! region.isPresent() && prerequisite(environment)) return true;
return false;
}
/**
* Returns whether deployment in the given environment is a prerequisite of deployment in this environment
*
* The required progression leading to prerequisites is test, staging, prod.
*/
private boolean prerequisite(Environment environment) {
if (this.environment == Environment.prod)
return environment == Environment.staging || environment == Environment.test;
if (this.environment == Environment.staging)
return environment == Environment.test;
return false;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} |
`ImmutableList.copyOf`? Or is Guava not available here? | public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<DeclaredZone> zones) {
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.zones = Collections.unmodifiableList(new ArrayList<>(zones));
} | this.zones = Collections.unmodifiableList(new ArrayList<>(zones)); | public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<DeclaredZone> zones) {
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.zones = ImmutableList.copyOf(zones);
} | class DeploymentSpec {
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<DeclaredZone> zones;
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the zones this declares as a read-only list. */
public List<DeclaredZone> zones() { return zones; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (DeclaredZone declaredZone : zones)
if (declaredZone.matches(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
List<DeclaredZone> zones = new ArrayList<>();
Element root = XML.getDocument(reader).getDocumentElement();
Optional<String> globalServiceId = Optional.empty();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
List<Element> regionTags = XML.getChildren(environmentTag, "region");
if (regionTags.isEmpty()) {
zones.add(new DeclaredZone(environment, Optional.empty(), false));
}
else {
for (Element regionTag : regionTags) {
RegionName region = RegionName.from(XML.getValue(regionTag).trim());
boolean active = environment == Environment.prod && readActive(regionTag);
zones.add(new DeclaredZone(environment, Optional.of(region), active));
}
}
if (Environment.prod.equals(environment)) {
globalServiceId = readGlobalServiceId(environmentTag);
} else if (readGlobalServiceId(environmentTag).isPresent()) {
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), zones);
}
/** Returns whether we should assume this tag name is the name of an environment */
private static boolean isEnvironmentName(String tagName) {
if (tagName.equals("upgrade")) return false;
return true;
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main (String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
public static class DeclaredZone {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
public boolean matches(Environment environment, Optional<RegionName> region) {
if (environment.equals(this.environment) && region.equals(this.region)) return true;
if ( ! region.isPresent() && prerequisite(environment)) return true;
return false;
}
/**
* Returns whether deployment in the given environment is a prerequisite of deployment in this environment
*
* The required progression leading to prerequisites is test, staging, prod.
*/
private boolean prerequisite(Environment environment) {
if (this.environment == Environment.prod)
return environment == Environment.staging || environment == Environment.test;
if (this.environment == Environment.staging)
return environment == Environment.test;
return false;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<DeclaredZone> zones;
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the zones this declares as a read-only list. */
public List<DeclaredZone> zones() { return zones; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (DeclaredZone declaredZone : zones)
if (declaredZone.matches(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
List<DeclaredZone> zones = new ArrayList<>();
Element root = XML.getDocument(reader).getDocumentElement();
Optional<String> globalServiceId = Optional.empty();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
List<Element> regionTags = XML.getChildren(environmentTag, "region");
if (regionTags.isEmpty()) {
zones.add(new DeclaredZone(environment, Optional.empty(), false));
}
else {
for (Element regionTag : regionTags) {
RegionName region = RegionName.from(XML.getValue(regionTag).trim());
boolean active = environment == Environment.prod && readActive(regionTag);
zones.add(new DeclaredZone(environment, Optional.of(region), active));
}
}
if (Environment.prod.equals(environment)) {
globalServiceId = readGlobalServiceId(environmentTag);
} else if (readGlobalServiceId(environmentTag).isPresent()) {
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), zones);
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main (String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
public static class DeclaredZone {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
public boolean matches(Environment environment, Optional<RegionName> region) {
if (environment.equals(this.environment) && region.equals(this.region)) return true;
if ( ! region.isPresent() && prerequisite(environment)) return true;
return false;
}
/**
* Returns whether deployment in the given environment is a prerequisite of deployment in this environment
*
* The required progression leading to prerequisites is test, staging, prod.
*/
private boolean prerequisite(Environment environment) {
if (this.environment == Environment.prod)
return environment == Environment.staging || environment == Environment.test;
if (this.environment == Environment.staging)
return environment == Environment.test;
return false;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} |
java's native timing classes instead of double | public void handleReply(Reply reply) {
Object o = reply.getContext();
if (!(o instanceof ReplyContext)) {
return;
}
ReplyContext context = (ReplyContext) o;
final double latency = (System.currentTimeMillis() - context.creationTime) / 1000.0d;
metric.set(MetricNames.LATENCY, latency, null);
if (reply.hasErrors()) {
Set<Integer> errorCodes = MessageBusAsyncSession.getErrorCodes(reply);
metricsHelper.reportFailure(DocumentOperationType.fromMessage(reply.getMessage()),
DocumentOperationStatus.fromMessageBusErrorCodes(errorCodes));
metric.add(MetricNames.FAILED, 1, null);
enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR,
reply.getError(0).getCode() == DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED, reply.getTrace());
} else {
metricsHelper.reportSuccessful(DocumentOperationType.fromMessage(reply.getMessage()), latency);
metric.add(MetricNames.SUCCEEDED, 1, null);
enqueue(context, "Document processed.", ErrorCode.OK, false, reply.getTrace());
}
} | final double latency = (System.currentTimeMillis() - context.creationTime) / 1000.0d; | public void handleReply(Reply reply) {
Object o = reply.getContext();
if (!(o instanceof ReplyContext)) {
return;
}
ReplyContext context = (ReplyContext) o;
final double latencyInSeconds = (System.currentTimeMillis() - context.creationTime) / 1000.0d;
metric.set(MetricNames.LATENCY, latencyInSeconds, null);
if (reply.hasErrors()) {
Set<Integer> errorCodes = reply.getErrorCodes();
metricsHelper.reportFailure(DocumentOperationType.fromMessage(reply.getMessage()),
DocumentOperationStatus.fromMessageBusErrorCodes(errorCodes));
metric.add(MetricNames.FAILED, 1, null);
enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR,
reply.getError(0).getCode() == DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED, reply.getTrace());
} else {
metricsHelper.reportSuccessful(DocumentOperationType.fromMessage(reply.getMessage()), latencyInSeconds);
metric.add(MetricNames.SUCCEEDED, 1, null);
enqueue(context, "Document processed.", ErrorCode.OK, false, reply.getTrace());
}
} | class FeedReplyReader implements ReplyHandler {
private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName());
private final Metric metric;
private final DocumentApiMetricsHelper metricsHelper;
public FeedReplyReader(Metric metric, DocumentApiMetricsHelper metricsHelper) {
this.metric = metric;
this.metricsHelper = metricsHelper;
}
@Override
private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) {
try {
String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : "";
context.feedReplies.put(new OperationStatus(message, context.docId, status, isConditionNotMet, traceMessage));
} catch (InterruptedException e) {
log.log(LogLevel.WARNING,
"Interrupted while enqueueing result from putting document with id: " + context.docId);
Thread.currentThread().interrupt();
}
}
} | class FeedReplyReader implements ReplyHandler {
private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName());
private final Metric metric;
private final DocumentApiMetrics metricsHelper;
public FeedReplyReader(Metric metric, DocumentApiMetrics metricsHelper) {
this.metric = metric;
this.metricsHelper = metricsHelper;
}
@Override
private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) {
try {
String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : "";
context.feedReplies.put(new OperationStatus(message, context.docId, status, isConditionNotMet, traceMessage));
} catch (InterruptedException e) {
log.log(LogLevel.WARNING,
"Interrupted while enqueueing result from putting document with id: " + context.docId);
Thread.currentThread().interrupt();
}
}
} |
Either use Duration or include the time unit in the variable name. | public void handleReply(Reply reply) {
Object o = reply.getContext();
if (!(o instanceof ReplyContext)) {
return;
}
ReplyContext context = (ReplyContext) o;
final double latency = (System.currentTimeMillis() - context.creationTime) / 1000.0d;
metric.set(MetricNames.LATENCY, latency, null);
if (reply.hasErrors()) {
Set<Integer> errorCodes = MessageBusAsyncSession.getErrorCodes(reply);
metricsHelper.reportFailure(DocumentOperationType.fromMessage(reply.getMessage()),
DocumentOperationStatus.fromMessageBusErrorCodes(errorCodes));
metric.add(MetricNames.FAILED, 1, null);
enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR,
reply.getError(0).getCode() == DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED, reply.getTrace());
} else {
metricsHelper.reportSuccessful(DocumentOperationType.fromMessage(reply.getMessage()), latency);
metric.add(MetricNames.SUCCEEDED, 1, null);
enqueue(context, "Document processed.", ErrorCode.OK, false, reply.getTrace());
}
} | final double latency = (System.currentTimeMillis() - context.creationTime) / 1000.0d; | public void handleReply(Reply reply) {
Object o = reply.getContext();
if (!(o instanceof ReplyContext)) {
return;
}
ReplyContext context = (ReplyContext) o;
final double latencyInSeconds = (System.currentTimeMillis() - context.creationTime) / 1000.0d;
metric.set(MetricNames.LATENCY, latencyInSeconds, null);
if (reply.hasErrors()) {
Set<Integer> errorCodes = reply.getErrorCodes();
metricsHelper.reportFailure(DocumentOperationType.fromMessage(reply.getMessage()),
DocumentOperationStatus.fromMessageBusErrorCodes(errorCodes));
metric.add(MetricNames.FAILED, 1, null);
enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR,
reply.getError(0).getCode() == DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED, reply.getTrace());
} else {
metricsHelper.reportSuccessful(DocumentOperationType.fromMessage(reply.getMessage()), latencyInSeconds);
metric.add(MetricNames.SUCCEEDED, 1, null);
enqueue(context, "Document processed.", ErrorCode.OK, false, reply.getTrace());
}
} | class FeedReplyReader implements ReplyHandler {
private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName());
private final Metric metric;
private final DocumentApiMetricsHelper metricsHelper;
public FeedReplyReader(Metric metric, DocumentApiMetricsHelper metricsHelper) {
this.metric = metric;
this.metricsHelper = metricsHelper;
}
@Override
private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) {
try {
String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : "";
context.feedReplies.put(new OperationStatus(message, context.docId, status, isConditionNotMet, traceMessage));
} catch (InterruptedException e) {
log.log(LogLevel.WARNING,
"Interrupted while enqueueing result from putting document with id: " + context.docId);
Thread.currentThread().interrupt();
}
}
} | class FeedReplyReader implements ReplyHandler {
private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName());
private final Metric metric;
private final DocumentApiMetrics metricsHelper;
public FeedReplyReader(Metric metric, DocumentApiMetrics metricsHelper) {
this.metric = metric;
this.metricsHelper = metricsHelper;
}
@Override
private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) {
try {
String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : "";
context.feedReplies.put(new OperationStatus(message, context.docId, status, isConditionNotMet, traceMessage));
} catch (InterruptedException e) {
log.log(LogLevel.WARNING,
"Interrupted while enqueueing result from putting document with id: " + context.docId);
Thread.currentThread().interrupt();
}
}
} |
Either use Duration or include the time unit in the name. | public void handleReply(Reply reply) {
Object o = reply.getContext();
if (!(o instanceof ReplyContext)) {
return;
}
ReplyContext context = (ReplyContext) o;
final double latency = (System.currentTimeMillis() - context.creationTime) / 1000.0d;
metric.set(MetricNames.LATENCY, latency, null);
if (reply.hasErrors()) {
Set<Integer> errorCodes = MessageBusAsyncSession.getErrorCodes(reply);
metricsHelper.reportFailure(DocumentOperationType.fromMessage(reply.getMessage()),
DocumentOperationStatus.fromMessageBusErrorCodes(errorCodes));
metric.add(MetricNames.FAILED, 1, null);
enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR,
reply.getError(0).getCode() == DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED, reply.getTrace());
} else {
metricsHelper.reportSuccessful(DocumentOperationType.fromMessage(reply.getMessage()), latency);
metric.add(MetricNames.SUCCEEDED, 1, null);
enqueue(context, "Document processed.", ErrorCode.OK, false, reply.getTrace());
}
} | final double latency = (System.currentTimeMillis() - context.creationTime) / 1000.0d; | public void handleReply(Reply reply) {
Object o = reply.getContext();
if (!(o instanceof ReplyContext)) {
return;
}
ReplyContext context = (ReplyContext) o;
final double latencyInSeconds = (System.currentTimeMillis() - context.creationTime) / 1000.0d;
metric.set(MetricNames.LATENCY, latencyInSeconds, null);
if (reply.hasErrors()) {
Set<Integer> errorCodes = reply.getErrorCodes();
metricsHelper.reportFailure(DocumentOperationType.fromMessage(reply.getMessage()),
DocumentOperationStatus.fromMessageBusErrorCodes(errorCodes));
metric.add(MetricNames.FAILED, 1, null);
enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR,
reply.getError(0).getCode() == DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED, reply.getTrace());
} else {
metricsHelper.reportSuccessful(DocumentOperationType.fromMessage(reply.getMessage()), latencyInSeconds);
metric.add(MetricNames.SUCCEEDED, 1, null);
enqueue(context, "Document processed.", ErrorCode.OK, false, reply.getTrace());
}
} | class FeedReplyReader implements ReplyHandler {
private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName());
private final Metric metric;
private final DocumentApiMetricsHelper metricsHelper;
public FeedReplyReader(Metric metric, DocumentApiMetricsHelper metricsHelper) {
this.metric = metric;
this.metricsHelper = metricsHelper;
}
@Override
private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) {
try {
String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : "";
context.feedReplies.put(new OperationStatus(message, context.docId, status, isConditionNotMet, traceMessage));
} catch (InterruptedException e) {
log.log(LogLevel.WARNING,
"Interrupted while enqueueing result from putting document with id: " + context.docId);
Thread.currentThread().interrupt();
}
}
} | class FeedReplyReader implements ReplyHandler {
private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName());
private final Metric metric;
private final DocumentApiMetrics metricsHelper;
public FeedReplyReader(Metric metric, DocumentApiMetrics metricsHelper) {
this.metric = metric;
this.metricsHelper = metricsHelper;
}
@Override
private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) {
try {
String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : "";
context.feedReplies.put(new OperationStatus(message, context.docId, status, isConditionNotMet, traceMessage));
} catch (InterruptedException e) {
log.log(LogLevel.WARNING,
"Interrupted while enqueueing result from putting document with id: " + context.docId);
Thread.currentThread().interrupt();
}
}
} |
IntLiteral(long longValue, Type type) need catch exception, too many catch and try will make code not neat, I think just use IntLiteral(long value) is enough. CompareMode.IS_SUPERTYPE_OF will help us find the correct function anyway | public Void visitFunctionCall(FunctionCallExpr node, Scope scope) {
Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new);
if (node.isNondeterministicBuiltinFnName()) {
ExprId exprId = analyzeState.getNextNondeterministicId();
node.setNondeterministicId(exprId);
}
Function fn;
String fnName = node.getFnName().getFunction();
if (fnName.equals(FunctionSet.COUNT) && node.getParams().isDistinct()) {
fn = Expr.getBuiltinFunction(FunctionSet.COUNT, new Type[] {argumentTypes[0]},
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
} else if (fnName.equals(FunctionSet.EXCHANGE_BYTES) || fnName.equals(FunctionSet.EXCHANGE_SPEED)) {
fn = Expr.getBuiltinFunction(fnName, argumentTypes,
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
fn.setArgsType(argumentTypes);
fn.setIsNullable(false);
} else if (fnName.equals(FunctionSet.TIME_SLICE) || fnName.equals(FunctionSet.DATE_SLICE)) {
if (!(node.getChild(1) instanceof IntLiteral)) {
throw new SemanticException(
fnName + " requires second parameter must be a constant interval");
}
if (((IntLiteral) node.getChild(1)).getValue() <= 0) {
throw new SemanticException(
fnName + " requires second parameter must be greater than 0");
}
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
} else if (FunctionSet.decimalRoundFunctions.contains(fnName) ||
Arrays.stream(argumentTypes).anyMatch(Type::isDecimalV3)) {
if (FunctionSet.varianceFunctions.contains(fnName)) {
Type[] doubleArgTypes = Stream.of(argumentTypes).map(t -> Type.DOUBLE).toArray(Type[]::new);
fn = Expr.getBuiltinFunction(fnName, doubleArgTypes,
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
} else {
fn = getDecimalV3Function(node, argumentTypes);
}
} else if (Arrays.stream(argumentTypes).anyMatch(arg -> arg.matchesType(Type.TIME))) {
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
if (fn instanceof AggregateFunction) {
throw new SemanticException("Time Type can not used in %s function",
fnName);
}
} else if (FunctionSet.STR_TO_DATE.equals(fnName)) {
fn = getStrToDateFunction(node, argumentTypes);
} else if (fnName.equals(FunctionSet.ARRAY_FILTER)) {
if (node.getChildren().size() != 2) {
throw new SemanticException(fnName + " should have 2 array inputs or lambda functions.");
}
if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) {
throw new SemanticException("The first input of " + fnName +
" should be an array or a lambda function.");
}
if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) {
throw new SemanticException("The second input of " + fnName +
" should be an array or a lambda function.");
}
if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) {
throw new SemanticException("The second input of array_filter " +
node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>");
}
node.setChild(1, new CastExpr(Type.ARRAY_BOOLEAN, node.getChild(1)));
argumentTypes[1] = Type.ARRAY_BOOLEAN;
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
} else if (fnName.equals(FunctionSet.ARRAY_SORTBY)) {
if (node.getChildren().size() != 2) {
throw new SemanticException(fnName + " should have 2 array inputs or lambda functions.");
}
if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) {
throw new SemanticException("The first input of " + fnName +
" should be an array or a lambda function.");
}
if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) {
throw new SemanticException("The second input of " + fnName +
" should be an array or a lambda function.");
}
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
} else if (fnName.equals(FunctionSet.ARRAY_SLICE)) {
for (int i = 1; i < argumentTypes.length; i++) {
argumentTypes[i] = Type.BIGINT;
}
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF);
} else if (fnName.equals(FunctionSet.ARRAY_CONCAT)) {
if (node.getChildren().size() < 2) {
throw new SemanticException(fnName + " should have at least two inputs");
}
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
} else if (fnName.equals("array_generate")) {
if (node.getChildren().size() < 1 || node.getChildren().size() > 3) {
throw new SemanticException(fnName + " has wrong input numbers");
}
for (Expr expr : node.getChildren()) {
if ((expr instanceof SlotRef) && node.getChildren().size() != 3) {
throw new SemanticException(fnName + " with IntColumn doesn't support default parameters");
}
if (!(expr instanceof IntLiteral) && !(expr instanceof LargeIntLiteral) &&
!(expr instanceof SlotRef) && !(expr instanceof NullLiteral)) {
throw new SemanticException(fnName + "'s parameter only support Integer");
}
}
if (node.getChildren().size() == 1) {
LiteralExpr secondParam = (LiteralExpr) node.getChild(0);
node.clearChildren();
try {
node.addChild(new IntLiteral("1", Type.TINYINT));
node.addChild(secondParam);
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
}
if (node.getChildren().size() == 2) {
int idx = 0;
BigInteger[] childValues = new BigInteger[2];
for (Expr expr : node.getChildren()) {
if (expr instanceof NullLiteral) {
throw new SemanticException(fnName + "'s parameter only support Integer");
} else if (expr instanceof IntLiteral) {
childValues[idx++] = BigInteger.valueOf(((IntLiteral) expr).getValue());
} else {
childValues[idx++] = ((LargeIntLiteral) expr).getValue();
}
}
if (childValues[0].compareTo(childValues[1]) < 0) {
node.addChild(new IntLiteral(1));
} else {
node.addChild(new IntLiteral(-1));
}
}
argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new);
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF);
} else {
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
}
if (fn == null) {
fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes);
}
if (fn == null) {
throw new SemanticException("No matching function with signature: %s(%s).",
fnName,
node.getParams().isStar() ? "*" : Joiner.on(", ")
.join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList())));
}
if (fn instanceof TableFunction) {
throw unsupportedException("Table function cannot be used in expression");
}
for (int i = 0; i < fn.getNumArgs(); i++) {
if (!argumentTypes[i].matchesType(fn.getArgs()[i]) &&
!Type.canCastToAsFunctionParameter(argumentTypes[i], fn.getArgs()[i])) {
throw new SemanticException("No matching function with signature: %s(%s).", fnName,
node.getParams().isStar() ? "*" : Joiner.on(", ")
.join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList())));
}
}
node.setFn(fn);
node.setType(fn.getReturnType());
FunctionAnalyzer.analyze(node);
return null;
} | node.addChild(new IntLiteral("1", Type.TINYINT)); | public Void visitFunctionCall(FunctionCallExpr node, Scope scope) {
Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new);
if (node.isNondeterministicBuiltinFnName()) {
ExprId exprId = analyzeState.getNextNondeterministicId();
node.setNondeterministicId(exprId);
}
Function fn;
String fnName = node.getFnName().getFunction();
checkFunction(fnName, node);
if (fnName.equals(FunctionSet.COUNT) && node.getParams().isDistinct()) {
fn = Expr.getBuiltinFunction(FunctionSet.COUNT, new Type[] {argumentTypes[0]},
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
} else if (fnName.equals(FunctionSet.EXCHANGE_BYTES) || fnName.equals(FunctionSet.EXCHANGE_SPEED)) {
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
fn.setArgsType(argumentTypes);
fn.setIsNullable(false);
} else if (DecimalV3FunctionAnalyzer.argumentTypeContainDecimalV3(fnName, argumentTypes)) {
fn = DecimalV3FunctionAnalyzer.getDecimalV3Function(session, node, argumentTypes);
} else if (Arrays.stream(argumentTypes).anyMatch(arg -> arg.matchesType(Type.TIME))) {
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
if (fn instanceof AggregateFunction) {
throw new SemanticException("Time Type can not used in" + fnName + " function", node.getPos());
}
} else if (FunctionSet.STR_TO_DATE.equals(fnName)) {
fn = getStrToDateFunction(node, argumentTypes);
} else if (FunctionSet.ARRAY_GENERATE.equals(fnName)) {
fn = getArrayGenerateFunction(node);
argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new);
} else {
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
}
if (fn == null) {
fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes);
}
if (fn == null) {
String msg = String.format("No matching function with signature: %s(%s)",
fnName,
node.getParams().isStar() ? "*" : Joiner.on(", ")
.join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList())));
throw new SemanticException(msg, node.getPos());
}
if (fn instanceof TableFunction) {
throw new SemanticException("Table function cannot be used in expression", node.getPos());
}
for (int i = 0; i < fn.getNumArgs(); i++) {
if (!argumentTypes[i].matchesType(fn.getArgs()[i]) &&
!Type.canCastToAsFunctionParameter(argumentTypes[i], fn.getArgs()[i])) {
String msg = String.format("No matching function with signature: %s(%s)", fnName,
node.getParams().isStar() ? "*" :
Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.joining(", ")));
throw new SemanticException(msg, node.getPos());
}
}
if (fn.hasVarArgs()) {
Type varType = fn.getArgs()[fn.getNumArgs() - 1];
for (int i = fn.getNumArgs(); i < argumentTypes.length; i++) {
if (!argumentTypes[i].matchesType(varType) &&
!Type.canCastToAsFunctionParameter(argumentTypes[i], varType)) {
String msg = String.format("Variadic function %s(%s) can't support type: %s", fnName,
Arrays.stream(fn.getArgs()).map(Type::toSql).collect(Collectors.joining(", ")),
argumentTypes[i]);
throw new SemanticException(msg, node.getPos());
}
}
}
node.setFn(fn);
node.setType(fn.getReturnType());
FunctionAnalyzer.analyze(node);
return null;
} | class Visitor extends AstVisitor<Void, Scope> {
private static final List<String> ADD_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_ADD,
FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD);
private static final List<String> SUB_DATE_FUNCTIONS =
Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE,
FunctionSet.DAYS_SUB);
private final AnalyzeState analyzeState;
private final ConnectContext session;
public Visitor(AnalyzeState analyzeState, ConnectContext session) {
this.analyzeState = analyzeState;
this.session = session;
}
@Override
public Void visitExpression(Expr node, Scope scope) {
throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName());
}
private void handleResolvedField(SlotRef slot, ResolvedField resolvedField) {
analyzeState.addColumnReference(slot, FieldId.from(resolvedField));
}
@Override
public Void visitSubfieldExpr(SubfieldExpr node, Scope scope) {
Expr child = node.getChild(0);
Preconditions.checkArgument(child.getType().isStructType(),
String.format("%s must be a struct type, check if you are using `'`", child.toSql()));
List<String> fieldNames = node.getFieldNames();
Type tmpType = child.getType();
for (String fieldName : fieldNames) {
StructType structType = (StructType) tmpType;
StructField structField = structType.getField(fieldName);
if (structField == null) {
throw new SemanticException("Struct subfield '%s' cannot be resolved", fieldName);
}
tmpType = structField.getType();
}
node.setType(tmpType);
return null;
}
@Override
public Void visitSlot(SlotRef node, Scope scope) {
ResolvedField resolvedField = scope.resolveField(node);
node.setType(resolvedField.getField().getType());
node.setTblName(resolvedField.getField().getRelationAlias());
if (node.getType().isStructType()) {
node.setCol(resolvedField.getField().getName());
node.setLabel(resolvedField.getField().getName());
if (resolvedField.getField().getTmpUsedStructFieldPos().size() > 0) {
node.setUsedStructFieldPos(resolvedField.getField().getTmpUsedStructFieldPos());
node.resetStructInfo();
}
}
handleResolvedField(node, resolvedField);
return null;
}
@Override
public Void visitFieldReference(FieldReference node, Scope scope) {
Field field = scope.getRelationFields().getFieldByIndex(node.getFieldIndex());
node.setType(field.getType());
return null;
}
@Override
public Void visitArrayExpr(ArrayExpr node, Scope scope) {
if (!node.getChildren().isEmpty()) {
try {
Type targetItemType;
if (node.getType() != null) {
targetItemType = ((ArrayType) node.getType()).getItemType();
} else {
targetItemType = TypeManager.getCommonSuperType(
node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()));
}
for (int i = 0; i < node.getChildren().size(); i++) {
if (!node.getChildren().get(i).getType().matchesType(targetItemType)) {
node.castChild(targetItemType, i);
}
}
node.setType(new ArrayType(targetItemType));
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
} else {
node.setType(Type.ARRAY_NULL);
}
return null;
}
@Override
public Void visitCollectionElementExpr(CollectionElementExpr node, Scope scope) {
Expr expr = node.getChild(0);
Expr subscript = node.getChild(1);
if (!expr.getType().isArrayType() && !expr.getType().isMapType()) {
throw new SemanticException("cannot subscript type " + expr.getType()
+ " because it is not an array or a map");
}
if (expr.getType().isArrayType()) {
if (!subscript.getType().isNumericType()) {
throw new SemanticException("array subscript must have type integer");
}
try {
if (subscript.getType().getPrimitiveType() != PrimitiveType.INT) {
node.castChild(Type.INT, 1);
}
node.setType(((ArrayType) expr.getType()).getItemType());
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
} else {
try {
if (subscript.getType().getPrimitiveType() !=
((MapType) expr.getType()).getKeyType().getPrimitiveType()) {
node.castChild(((MapType) expr.getType()).getKeyType(), 1);
}
node.setType(((MapType) expr.getType()).getValueType());
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
}
return null;
}
@Override
public Void visitArraySliceExpr(ArraySliceExpr node, Scope scope) {
if (!node.getChild(0).getType().isArrayType()) {
throw new SemanticException("cannot subscript type" +
node.getChild(0).getType() + " because it is not an array");
}
node.setType(node.getChild(0).getType());
return null;
}
@Override
public Void visitArrowExpr(ArrowExpr node, Scope scope) {
Expr item = node.getChild(0);
Expr key = node.getChild(1);
if (!key.isLiteral() || !key.getType().isStringType()) {
throw new SemanticException("right operand of -> should be string literal, but got " + key);
}
if (!item.getType().isJsonType()) {
throw new SemanticException(
"-> operator could only be used for json column, but got " + item.getType());
}
node.setType(Type.JSON);
return null;
}
@Override
public Void visitLambdaFunctionExpr(LambdaFunctionExpr node, Scope scope) {
if (scope.getLambdaInputs().size() == 0) {
throw new SemanticException("Lambda Functions can only be used in high-order functions with arrays.");
}
if (scope.getLambdaInputs().size() != node.getChildren().size() - 1) {
throw new SemanticException("Lambda arguments should equal to lambda input arrays.");
}
Set<String> set = new HashSet<>();
List<LambdaArgument> args = Lists.newArrayList();
for (int i = 1; i < node.getChildren().size(); ++i) {
args.add((LambdaArgument) node.getChild(i));
String name = ((LambdaArgument) node.getChild(i)).getName();
if (set.contains(name)) {
throw new SemanticException("Lambda argument: " + name + " is duplicated.");
}
set.add(name);
((LambdaArgument) node.getChild(i)).setNullable(scope.getLambdaInputs().get(i - 1).isNullable());
node.getChild(i).setType(scope.getLambdaInputs().get(i - 1).getType());
}
Scope lambdaScope = new Scope(args, scope);
ExpressionAnalyzer.analyzeExpression(node.getChild(0), this.analyzeState, lambdaScope, this.session);
node.setType(Type.FUNCTION);
scope.clearLambdaInputs();
return null;
}
@Override
public Void visitCompoundPredicate(CompoundPredicate node, Scope scope) {
for (int i = 0; i < node.getChildren().size(); i++) {
Type type = node.getChild(i).getType();
if (!type.isBoolean() && !type.isNull()) {
throw new SemanticException("Operand '%s' part of predicate " +
"'%s' should return type 'BOOLEAN' but returns type '%s'.",
AstToStringBuilder.toString(node), AstToStringBuilder.toString(node.getChild(i)),
type.toSql());
}
}
node.setType(Type.BOOLEAN);
return null;
}
@Override
public Void visitBetweenPredicate(BetweenPredicate node, Scope scope) {
predicateBaseAndCheck(node);
List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList());
Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list);
for (Type type : list) {
if (!Type.canCastTo(type, compatibleType)) {
throw new SemanticException(
"between predicate type " + type.toSql() + " with type " + compatibleType.toSql()
+ " is invalid.");
}
}
return null;
}
@Override
public Void visitBinaryPredicate(BinaryPredicate node, Scope scope) {
Type type1 = node.getChild(0).getType();
Type type2 = node.getChild(1).getType();
Type compatibleType =
TypeManager.getCompatibleTypeForBinary(node.getOp().isNotRangeComparison(), type1, type2);
final String ERROR_MSG = "Column type %s does not support binary predicate operation.";
if (!Type.canCastTo(type1, compatibleType)) {
throw new SemanticException(String.format(ERROR_MSG, type1.toSql()));
}
if (!Type.canCastTo(type2, compatibleType)) {
throw new SemanticException(String.format(ERROR_MSG, type1.toSql()));
}
node.setType(Type.BOOLEAN);
return null;
}
@Override
public Void visitArithmeticExpr(ArithmeticExpr node, Scope scope) {
if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.BINARY_INFIX) {
ArithmeticExpr.Operator op = node.getOp();
Type t1 = node.getChild(0).getType().getNumResultType();
Type t2 = node.getChild(1).getType().getNumResultType();
if (t1.isDecimalV3() || t2.isDecimalV3()) {
try {
node.rewriteDecimalOperation();
} catch (AnalysisException ex) {
throw new SemanticException(ex.getMessage());
}
Type lhsType = node.getChild(0).getType();
Type rhsType = node.getChild(1).getType();
Type resultType = node.getType();
Type[] args = {lhsType, rhsType};
Function fn = Expr.getBuiltinFunction(op.getName(), args, Function.CompareMode.IS_IDENTICAL);
Function newFn = new ScalarFunction(fn.getFunctionName(), args, resultType, fn.hasVarArgs());
node.setType(resultType);
node.setFn(newFn);
return null;
}
Type lhsType;
Type rhsType;
switch (op) {
case MULTIPLY:
case ADD:
case SUBTRACT:
lhsType = ArithmeticExpr.getBiggerType(ArithmeticExpr.getCommonType(t1, t2));
rhsType = lhsType;
break;
case MOD:
lhsType = ArithmeticExpr.getCommonType(t1, t2);
rhsType = lhsType;
break;
case DIVIDE:
lhsType = ArithmeticExpr.getCommonType(t1, t2);
if (lhsType.isFixedPointType()) {
lhsType = Type.DOUBLE;
}
rhsType = lhsType;
break;
case INT_DIVIDE:
case BITAND:
case BITOR:
case BITXOR:
lhsType = ArithmeticExpr.getCommonType(t1, t2);
if (!lhsType.isFixedPointType()) {
lhsType = Type.BIGINT;
}
rhsType = lhsType;
break;
case BIT_SHIFT_LEFT:
case BIT_SHIFT_RIGHT:
case BIT_SHIFT_RIGHT_LOGICAL:
lhsType = t1;
rhsType = Type.BIGINT;
break;
default:
throw unsupportedException("Unknown arithmetic operation " + op + " in: " + node);
}
if (node.getChild(0).getType().equals(Type.NULL) && node.getChild(1).getType().equals(Type.NULL)) {
lhsType = Type.NULL;
rhsType = Type.NULL;
}
if (!Type.NULL.equals(node.getChild(0).getType()) && !Type.canCastTo(t1, lhsType)) {
throw new SemanticException(
"cast type " + node.getChild(0).getType().toSql() + " with type " + lhsType.toSql()
+ " is invalid.");
}
if (!Type.NULL.equals(node.getChild(1).getType()) && !Type.canCastTo(t2, rhsType)) {
throw new SemanticException(
"cast type " + node.getChild(1).getType().toSql() + " with type " + rhsType.toSql()
+ " is invalid.");
}
Function fn = Expr.getBuiltinFunction(op.getName(), new Type[] {lhsType, rhsType},
Function.CompareMode.IS_SUPERTYPE_OF);
/*
* commonType is the common type of the parameters of the function,
* and fn.getReturnType() is the return type of the function after execution
* So we use fn.getReturnType() as node type
*/
node.setType(fn.getReturnType());
node.setFn(fn);
} else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_PREFIX) {
Function fn = Expr.getBuiltinFunction(
node.getOp().getName(), new Type[] {Type.BIGINT}, Function.CompareMode.IS_SUPERTYPE_OF);
node.setType(Type.BIGINT);
node.setFn(fn);
} else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_POSTFIX) {
throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName());
} else {
throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName());
}
return null;
}
List<String> addDateFunctions = Lists.newArrayList(FunctionSet.DATE_ADD,
FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD);
List<String> subDateFunctions = Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE,
FunctionSet.DAYS_SUB);
@Override
public Void visitTimestampArithmeticExpr(TimestampArithmeticExpr node, Scope scope) {
node.setChild(0, TypeManager.addCastExpr(node.getChild(0), Type.DATETIME));
String funcOpName;
if (node.getFuncName() != null) {
if (ADD_DATE_FUNCTIONS.contains(node.getFuncName())) {
funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "add");
} else if (SUB_DATE_FUNCTIONS.contains(node.getFuncName())) {
funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "sub");
} else {
node.setChild(1, TypeManager.addCastExpr(node.getChild(1), Type.DATETIME));
funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "diff");
}
} else {
funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(),
(node.getOp() == ArithmeticExpr.Operator.ADD) ? "add" : "sub");
}
Type[] argumentTypes = node.getChildren().stream().map(Expr::getType)
.toArray(Type[]::new);
Function fn = Expr.getBuiltinFunction(funcOpName.toLowerCase(), argumentTypes,
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
if (fn == null) {
throw new SemanticException("No matching function with signature: %s(%s).", funcOpName, Joiner.on(", ")
.join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList())));
}
node.setType(fn.getReturnType());
node.setFn(fn);
return null;
}
@Override
public Void visitExistsPredicate(ExistsPredicate node, Scope scope) {
predicateBaseAndCheck(node);
return null;
}
@Override
public Void visitInPredicate(InPredicate node, Scope scope) {
predicateBaseAndCheck(node);
List<Expr> queryExpressions = Lists.newArrayList();
node.collect(arg -> arg instanceof Subquery, queryExpressions);
if (queryExpressions.size() > 0 && node.getChildren().size() > 2) {
throw new SemanticException("In Predicate only support literal expression list");
}
List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList());
Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list);
for (Type type : list) {
if (type.isJsonType()) {
throw new SemanticException("InPredicate of JSON is not supported");
}
if (!Type.canCastTo(type, compatibleType)) {
throw new SemanticException(
"in predicate type " + type.toSql() + " with type " + compatibleType.toSql()
+ " is invalid.");
}
}
return null;
}
@Override
public Void visitMultiInPredicate(MultiInPredicate node, Scope scope) {
predicateBaseAndCheck(node);
List<Type> leftTypes =
node.getChildren().stream().limit(node.getNumberOfColumns()).map(Expr::getType)
.collect(Collectors.toList());
Subquery inSubquery = (Subquery) node.getChild(node.getNumberOfColumns());
List<Type> rightTypes =
inSubquery.getQueryStatement().getQueryRelation().getOutputExpression().stream().map(Expr::getType).
collect(Collectors.toList());
if (leftTypes.size() != rightTypes.size()) {
throw new SemanticException(
"subquery must return the same number of columns as provided by the IN predicate");
}
for (int i = 0; i < rightTypes.size(); ++i) {
if (leftTypes.get(i).isJsonType() || rightTypes.get(i).isJsonType() || leftTypes.get(i).isMapType() ||
rightTypes.get(i).isMapType() || leftTypes.get(i).isStructType() ||
rightTypes.get(i).isStructType()) {
throw new SemanticException("InPredicate of JSON, Map, Struct types is not supported");
}
if (!Type.canCastTo(leftTypes.get(i), rightTypes.get(i))) {
throw new SemanticException(
"in predicate type " + leftTypes.get(i).toSql() + " with type " + rightTypes.get(i).toSql()
+ " is invalid.");
}
}
return null;
}
@Override
public Void visitLiteral(LiteralExpr node, Scope scope) {
if (node instanceof LargeIntLiteral) {
BigInteger value = ((LargeIntLiteral) node).getValue();
if (value.compareTo(LargeIntLiteral.LARGE_INT_MIN) < 0 ||
value.compareTo(LargeIntLiteral.LARGE_INT_MAX) > 0) {
throw new SemanticException("Number Overflow. literal: " + value);
}
}
return null;
}
@Override
public Void visitIsNullPredicate(IsNullPredicate node, Scope scope) {
predicateBaseAndCheck(node);
return null;
}
@Override
public Void visitLikePredicate(LikePredicate node, Scope scope) {
predicateBaseAndCheck(node);
Type type1 = node.getChild(0).getType();
Type type2 = node.getChild(1).getType();
if (!type1.isStringType() && !type1.isNull()) {
throw new SemanticException(
"left operand of " + node.getOp().toString() + " must be of type STRING: " +
AstToStringBuilder.toString(node));
}
if (!type2.isStringType() && !type2.isNull()) {
throw new SemanticException(
"right operand of " + node.getOp().toString() + " must be of type STRING: " +
AstToStringBuilder.toString(node));
}
if (LikePredicate.Operator.REGEXP.equals(node.getOp()) && !type2.isNull() && node.getChild(1).isLiteral()) {
try {
Pattern.compile(((StringLiteral) node.getChild(1)).getValue());
} catch (PatternSyntaxException e) {
throw new SemanticException(
"Invalid regular expression in '" + AstToStringBuilder.toString(node) + "'");
}
}
return null;
}
private void predicateBaseAndCheck(Predicate node) {
node.setType(Type.BOOLEAN);
for (Expr expr : node.getChildren()) {
if (expr.getType().isOnlyMetricType() ||
(expr.getType().isComplexType() && !(node instanceof IsNullPredicate))) {
throw new SemanticException(
"HLL, BITMAP, PERCENTILE and ARRAY, MAP, STRUCT type couldn't as Predicate");
}
}
}
@Override
public Void visitCastExpr(CastExpr cast, Scope context) {
Type castType;
if (cast.isImplicit()) {
castType = cast.getType();
} else {
castType = cast.getTargetTypeDef().getType();
}
if (!Type.canCastTo(cast.getChild(0).getType(), castType)) {
throw new SemanticException("Invalid type cast from " + cast.getChild(0).getType().toSql() + " to "
+ castType.toSql() + " in sql `" +
AstToStringBuilder.toString(cast.getChild(0)).replace("%", "%%") + "`");
}
cast.setType(castType);
return null;
}
@Override
private Function getStrToDateFunction(FunctionCallExpr node, Type[] argumentTypes) {
/*
* @TODO: Determine the return type of this function
* If is format is constant and don't contains time part, return date type, to compatible with mysql.
* In fact we don't want to support str_to_date return date like mysql, reason:
* 1. The return type of FE/BE str_to_date function signature is datetime, return date
* let type different, it's will throw unpredictable error
* 2. Support return date and datetime at same time in one function is complicated.
* 3. The meaning of the function is confusing. In mysql, will return date if format is a constant
* string and it's not contains "%H/%M/%S" pattern, but it's a trick logic, if format is a variable
* expression, like: str_to_date(col1, col2), and the col2 is '%Y%m%d', the result always be
* datetime.
*/
Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(),
argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
if (fn == null) {
return null;
}
if (!node.getChild(1).isConstant()) {
return fn;
}
ExpressionMapping expressionMapping =
new ExpressionMapping(new Scope(RelationId.anonymous(), new RelationFields()),
com.google.common.collect.Lists.newArrayList());
ScalarOperator format = SqlToScalarOperatorTranslator.translate(node.getChild(1), expressionMapping,
new ColumnRefFactory());
if (format.isConstantRef() && !HAS_TIME_PART.matcher(format.toString()).matches()) {
return Expr.getBuiltinFunction("str2date", argumentTypes,
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
}
return fn;
}
Function getDecimalV3Function(FunctionCallExpr node, Type[] argumentTypes) {
Function fn;
String fnName = node.getFnName().getFunction();
Type commonType = DecimalV3FunctionAnalyzer.normalizeDecimalArgTypes(argumentTypes, fnName);
fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
if (fn == null) {
fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes);
}
if (fn == null) {
throw new SemanticException("No matching function with signature: %s(%s).", fnName,
node.getParams().isStar() ? "*" : Joiner.on(", ")
.join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList())));
}
if (DecimalV3FunctionAnalyzer.DECIMAL_AGG_FUNCTION.contains(fnName)) {
Type argType = node.getChild(0).getType();
if (DecimalV3FunctionAnalyzer.DECIMAL_AGG_VARIANCE_STDDEV_TYPE
.contains(fnName) && argType.isDecimalV3()) {
argType = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 38, 9);
node.setChild(0, TypeManager.addCastExpr(node.getChild(0), argType));
}
fn = DecimalV3FunctionAnalyzer
.rectifyAggregationFunction((AggregateFunction) fn, argType, commonType);
} else if (DecimalV3FunctionAnalyzer.DECIMAL_UNARY_FUNCTION_SET.contains(fnName) ||
DecimalV3FunctionAnalyzer.DECIMAL_IDENTICAL_TYPE_FUNCTION_SET.contains(fnName) ||
FunctionSet.IF.equals(fnName) || FunctionSet.MAX_BY.equals(fnName)) {
List<Type> argTypes;
if (FunctionSet.MONEY_FORMAT.equals(fnName)) {
argTypes = Arrays.asList(argumentTypes);
} else {
argTypes = Arrays.stream(fn.getArgs()).map(t -> t.isDecimalV3() ? commonType : t)
.collect(Collectors.toList());
}
Type returnType = fn.getReturnType();
if (returnType.isDecimalV3() && commonType.isValid()) {
returnType = commonType;
}
if (FunctionSet.MAX_BY.equals(fnName)) {
AggregateFunction newFn = new AggregateFunction(fn.getFunctionName(),
Arrays.asList(argumentTypes), returnType,
Type.VARCHAR, fn.hasVarArgs());
newFn.setFunctionId(fn.getFunctionId());
newFn.setChecksum(fn.getChecksum());
newFn.setBinaryType(fn.getBinaryType());
newFn.setHasVarArgs(fn.hasVarArgs());
newFn.setId(fn.getId());
newFn.setUserVisible(fn.isUserVisible());
newFn.setisAnalyticFn(true);
fn = newFn;
return fn;
}
ScalarFunction newFn = new ScalarFunction(fn.getFunctionName(), argTypes, returnType,
fn.getLocation(), ((ScalarFunction) fn).getSymbolName(),
((ScalarFunction) fn).getPrepareFnSymbol(),
((ScalarFunction) fn).getCloseFnSymbol());
newFn.setFunctionId(fn.getFunctionId());
newFn.setChecksum(fn.getChecksum());
newFn.setBinaryType(fn.getBinaryType());
newFn.setHasVarArgs(fn.hasVarArgs());
newFn.setId(fn.getId());
newFn.setUserVisible(fn.isUserVisible());
fn = newFn;
} else if (FunctionSet.decimalRoundFunctions.contains(fnName)) {
List<Type> argTypes = Arrays.stream(fn.getArgs()).map(t -> t.isDecimalV3() ? commonType : t)
.collect(Collectors.toList());
fn = DecimalV3FunctionAnalyzer.getFunctionOfRound(node, fn, argTypes);
}
return fn;
}
@Override
public Void visitGroupingFunctionCall(GroupingFunctionCallExpr node, Scope scope) {
if (node.getChildren().size() < 1) {
throw new SemanticException("GROUPING functions required at least one parameters");
}
if (node.getChildren().stream().anyMatch(e -> !(e instanceof SlotRef))) {
throw new SemanticException("grouping functions only support column.");
}
Type[] childTypes = new Type[1];
childTypes[0] = Type.BIGINT;
Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(),
childTypes, Function.CompareMode.IS_IDENTICAL);
node.setFn(fn);
node.setType(fn.getReturnType());
return null;
}
@Override
public Void visitCaseWhenExpr(CaseExpr node, Scope context) {
int start = 0;
int end = node.getChildren().size();
Expr caseExpr = null;
Expr elseExpr = null;
if (node.hasCaseExpr()) {
caseExpr = node.getChild(0);
start++;
}
if (node.hasElseExpr()) {
elseExpr = node.getChild(end - 1);
end--;
}
if (node.getChildren().stream().anyMatch(d -> !d.getType().isScalarType())) {
throw new SemanticException("case-when only support scalar type");
}
List<Type> whenTypes = Lists.newArrayList();
if (null != caseExpr) {
whenTypes.add(caseExpr.getType());
}
for (int i = start; i < end; i = i + 2) {
whenTypes.add(node.getChild(i).getType());
}
Type compatibleType = Type.NULL;
if (null != caseExpr) {
compatibleType = TypeManager.getCompatibleTypeForCaseWhen(whenTypes);
}
for (Type type : whenTypes) {
if (!Type.canCastTo(type, compatibleType)) {
throw new SemanticException("Invalid when type cast " + type.toSql()
+ " to " + compatibleType.toSql());
}
}
List<Type> thenTypes = Lists.newArrayList();
for (int i = start + 1; i < end; i = i + 2) {
thenTypes.add(node.getChild(i).getType());
}
if (null != elseExpr) {
thenTypes.add(elseExpr.getType());
}
Type returnType = thenTypes.stream().allMatch(Type.NULL::equals) ? Type.BOOLEAN :
TypeManager.getCompatibleTypeForCaseWhen(thenTypes);
for (Type type : thenTypes) {
if (!Type.canCastTo(type, returnType)) {
throw new SemanticException("Invalid then type cast " + type.toSql()
+ " to " + returnType.toSql());
}
}
node.setType(returnType);
return null;
}
@Override
public Void visitSubquery(Subquery node, Scope context) {
QueryAnalyzer queryAnalyzer = new QueryAnalyzer(session);
queryAnalyzer.analyze(node.getQueryStatement(), context);
node.setType(node.getQueryStatement().getQueryRelation().getRelationFields().getFieldByIndex(0).getType());
return null;
}
@Override
public Void visitAnalyticExpr(AnalyticExpr node, Scope context) {
visit(node.getFnCall(), context);
node.setType(node.getFnCall().getType());
if (node.getWindow() != null) {
if (node.getWindow().getLeftBoundary() != null &&
node.getWindow().getLeftBoundary().getExpr() != null) {
visit(node.getWindow().getLeftBoundary().getExpr(), context);
}
if (node.getWindow().getRightBoundary() != null &&
node.getWindow().getRightBoundary().getExpr() != null) {
visit(node.getWindow().getRightBoundary().getExpr(), context);
}
}
node.getPartitionExprs().forEach(e -> visit(e, context));
node.getOrderByElements().stream().map(OrderByElement::getExpr).forEach(e -> visit(e, context));
verifyAnalyticExpression(node);
return null;
}
@Override
public Void visitInformationFunction(InformationFunction node, Scope context) {
String funcType = node.getFuncType();
if (funcType.equalsIgnoreCase("DATABASE") || funcType.equalsIgnoreCase("SCHEMA")) {
node.setType(Type.VARCHAR);
node.setStrValue(ClusterNamespace.getNameFromFullName(session.getDatabase()));
} else if (funcType.equalsIgnoreCase("USER")) {
node.setType(Type.VARCHAR);
node.setStrValue(session.getUserIdentity().toString());
} else if (funcType.equalsIgnoreCase("CURRENT_USER")) {
node.setType(Type.VARCHAR);
node.setStrValue(session.getCurrentUserIdentity().toString());
} else if (funcType.equalsIgnoreCase("CURRENT_ROLE")) {
node.setType(Type.VARCHAR);
PrivilegeManager manager = session.getGlobalStateMgr().getPrivilegeManager();
List<String> roleName = new ArrayList<>();
try {
for (Long roleId : session.getCurrentRoleIds()) {
RolePrivilegeCollection rolePrivilegeCollection =
manager.getRolePrivilegeCollectionUnlocked(roleId, true);
roleName.add(rolePrivilegeCollection.getName());
}
} catch (PrivilegeException e) {
throw new SemanticException(e.getMessage());
}
if (roleName.isEmpty()) {
node.setStrValue("NONE");
} else {
node.setStrValue(Joiner.on(", ").join(roleName));
}
} else if (funcType.equalsIgnoreCase("CONNECTION_ID")) {
node.setType(Type.BIGINT);
node.setIntValue(session.getConnectionId());
node.setStrValue("");
}
return null;
}
@Override
public Void visitVariableExpr(VariableExpr node, Scope context) {
try {
if (node.getSetType().equals(SetType.USER)) {
UserVariable userVariable = session.getUserVariables(node.getName());
if (userVariable == null) {
node.setType(Type.STRING);
node.setIsNull();
return null;
}
Type variableType = userVariable.getEvaluatedExpression().getType();
node.setType(variableType);
if (userVariable.getEvaluatedExpression() instanceof NullLiteral) {
node.setIsNull();
} else {
node.setValue(userVariable.getEvaluatedExpression().getRealObjectValue());
}
} else {
VariableMgr.fillValue(session.getSessionVariable(), node);
if (!Strings.isNullOrEmpty(node.getName()) &&
node.getName().equalsIgnoreCase(SessionVariable.SQL_MODE)) {
node.setType(Type.VARCHAR);
node.setValue(SqlModeHelper.decode((long) node.getValue()));
}
}
} catch (AnalysisException | DdlException e) {
throw new SemanticException(e.getMessage());
}
return null;
}
@Override
public Void visitDefaultValueExpr(DefaultValueExpr node, Scope context) {
node.setType(Type.VARCHAR);
return null;
}
@Override
public Void visitCloneExpr(CloneExpr node, Scope context) {
return null;
}
} | class Visitor extends AstVisitor<Void, Scope> {
private static final List<String> ADD_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_ADD,
FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD);
private static final List<String> SUB_DATE_FUNCTIONS =
Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE,
FunctionSet.DAYS_SUB);
private final AnalyzeState analyzeState;
private final ConnectContext session;
public Visitor(AnalyzeState analyzeState, ConnectContext session) {
this.analyzeState = analyzeState;
this.session = session;
}
@Override
public Void visitExpression(Expr node, Scope scope) {
throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(),
node.getPos());
}
private void handleResolvedField(SlotRef slot, ResolvedField resolvedField) {
analyzeState.addColumnReference(slot, FieldId.from(resolvedField));
}
@Override
public Void visitSubfieldExpr(SubfieldExpr node, Scope scope) {
Expr child = node.getChild(0);
if (!child.getType().isStructType()) {
throw new SemanticException(child.toSql() + " must be a struct type, check if you are using `'`",
child.getPos());
}
List<String> fieldNames = node.getFieldNames();
Type tmpType = child.getType();
for (String fieldName : fieldNames) {
StructType structType = (StructType) tmpType;
StructField structField = structType.getField(fieldName);
if (structField == null) {
throw new SemanticException(String.format("Struct subfield '%s' cannot be resolved", fieldName),
node.getPos());
}
tmpType = structField.getType();
}
node.setType(tmpType);
return null;
}
@Override
public Void visitSlot(SlotRef node, Scope scope) {
ResolvedField resolvedField = scope.resolveField(node);
node.setType(resolvedField.getField().getType());
node.setTblName(resolvedField.getField().getRelationAlias());
if (node.getType().isStructType()) {
node.setCol(resolvedField.getField().getName());
node.setLabel(resolvedField.getField().getName());
if (resolvedField.getField().getTmpUsedStructFieldPos().size() > 0) {
node.setUsedStructFieldPos(resolvedField.getField().getTmpUsedStructFieldPos());
node.resetStructInfo();
}
}
handleResolvedField(node, resolvedField);
return null;
}
@Override
public Void visitFieldReference(FieldReference node, Scope scope) {
Field field = scope.getRelationFields().getFieldByIndex(node.getFieldIndex());
node.setType(field.getType());
return null;
}
@Override
public Void visitArrayExpr(ArrayExpr node, Scope scope) {
if (!node.getChildren().isEmpty()) {
try {
Type targetItemType;
if (node.getType() != null) {
targetItemType = ((ArrayType) node.getType()).getItemType();
} else {
targetItemType = TypeManager.getCommonSuperType(
node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()));
}
for (int i = 0; i < node.getChildren().size(); i++) {
if (!node.getChildren().get(i).getType().matchesType(targetItemType)) {
node.castChild(targetItemType, i);
}
}
node.setType(new ArrayType(targetItemType));
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
} else {
node.setType(Type.ARRAY_NULL);
}
return null;
}
@Override
public Void visitMapExpr(MapExpr node, Scope scope) {
if (!node.getChildren().isEmpty()) {
Type keyType = Type.NULL;
Type valueType = Type.NULL;
if (node.getKeyExpr() != null) {
keyType = node.getKeyExpr().getType();
}
if (node.getValueExpr() != null) {
valueType = node.getValueExpr().getType();
}
node.setType(new MapType(keyType, valueType));
} else {
node.setType(new MapType(Type.NULL, Type.NULL));
}
return null;
}
@Override
public Void visitCollectionElementExpr(CollectionElementExpr node, Scope scope) {
Expr expr = node.getChild(0);
Expr subscript = node.getChild(1);
if (!expr.getType().isArrayType() && !expr.getType().isMapType()) {
throw new SemanticException("cannot subscript type " + expr.getType()
+ " because it is not an array or a map", expr.getPos());
}
if (expr.getType().isArrayType()) {
if (!subscript.getType().isNumericType()) {
throw new SemanticException("array subscript must have type integer", subscript.getPos());
}
try {
if (subscript.getType().getPrimitiveType() != PrimitiveType.INT) {
node.castChild(Type.INT, 1);
}
node.setType(((ArrayType) expr.getType()).getItemType());
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
} else {
try {
if (subscript.getType().getPrimitiveType() !=
((MapType) expr.getType()).getKeyType().getPrimitiveType()) {
node.castChild(((MapType) expr.getType()).getKeyType(), 1);
}
node.setType(((MapType) expr.getType()).getValueType());
} catch (AnalysisException e) {
throw new SemanticException(e.getMessage());
}
}
return null;
}
@Override
public Void visitArraySliceExpr(ArraySliceExpr node, Scope scope) {
if (!node.getChild(0).getType().isArrayType()) {
throw new SemanticException("cannot subscript type" +
node.getChild(0).getType() + " because it is not an array", node.getChild(0).getPos());
}
node.setType(node.getChild(0).getType());
return null;
}
@Override
public Void visitArrowExpr(ArrowExpr node, Scope scope) {
Expr item = node.getChild(0);
Expr key = node.getChild(1);
if (!key.isLiteral() || !key.getType().isStringType()) {
throw new SemanticException("right operand of -> should be string literal, but got " + key,
key.getPos());
}
if (!item.getType().isJsonType()) {
throw new SemanticException(
"-> operator could only be used for json column, but got " + item.getType(), item.getPos());
}
node.setType(Type.JSON);
return null;
}
@Override
public Void visitLambdaFunctionExpr(LambdaFunctionExpr node, Scope scope) {
if (scope.getLambdaInputs().size() == 0) {
throw new SemanticException(
"Lambda Functions can only be used in high-order functions with arrays/maps",
node.getPos());
}
if (scope.getLambdaInputs().size() != node.getChildren().size() - 1) {
throw new SemanticException("Lambda arguments should equal to lambda input arrays", node.getPos());
}
Set<String> set = new HashSet<>();
List<LambdaArgument> args = Lists.newArrayList();
for (int i = 1; i < node.getChildren().size(); ++i) {
args.add((LambdaArgument) node.getChild(i));
String name = ((LambdaArgument) node.getChild(i)).getName();
if (set.contains(name)) {
throw new SemanticException("Lambda argument: " + name + " is duplicated",
node.getChild(i).getPos());
}
set.add(name);
((LambdaArgument) node.getChild(i)).setNullable(scope.getLambdaInputs().get(i - 1).isNullable());
node.getChild(i).setType(scope.getLambdaInputs().get(i - 1).getType());
}
Scope lambdaScope = new Scope(args, scope);
ExpressionAnalyzer.analyzeExpression(node.getChild(0), this.analyzeState, lambdaScope, this.session);
node.setType(Type.FUNCTION);
scope.clearLambdaInputs();
return null;
}
@Override
public Void visitCompoundPredicate(CompoundPredicate node, Scope scope) {
for (int i = 0; i < node.getChildren().size(); i++) {
Type type = node.getChild(i).getType();
if (!type.isBoolean() && !type.isNull()) {
String msg = String.format("Operand '%s' part of predicate " +
"'%s' should return type 'BOOLEAN' but returns type '%s'",
AstToStringBuilder.toString(node), AstToStringBuilder.toString(node.getChild(i)),
type.toSql());
throw new SemanticException(msg, node.getChild(i).getPos());
}
}
node.setType(Type.BOOLEAN);
return null;
}
@Override
public Void visitBetweenPredicate(BetweenPredicate node, Scope scope) {
predicateBaseAndCheck(node);
List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList());
Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list);
for (Type type : list) {
if (!Type.canCastTo(type, compatibleType)) {
throw new SemanticException(
"between predicate type " + type.toSql() + " with type " + compatibleType.toSql()
+ " is invalid", node.getPos());
}
}
return null;
}
@Override
public Void visitBinaryPredicate(BinaryPredicate node, Scope scope) {
Type type1 = node.getChild(0).getType();
Type type2 = node.getChild(1).getType();
Type compatibleType =
TypeManager.getCompatibleTypeForBinary(node.getOp().isNotRangeComparison(), type1, type2);
final String ERROR_MSG = "Column type %s does not support binary predicate operation";
if (!Type.canCastTo(type1, compatibleType)) {
throw new SemanticException(String.format(ERROR_MSG, type1.toSql()), node.getPos());
}
if (!Type.canCastTo(type2, compatibleType)) {
throw new SemanticException(String.format(ERROR_MSG, type1.toSql()), node.getPos());
}
node.setType(Type.BOOLEAN);
return null;
}
@Override
public Void visitArithmeticExpr(ArithmeticExpr node, Scope scope) {
if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.BINARY_INFIX) {
ArithmeticExpr.Operator op = node.getOp();
Type t1 = node.getChild(0).getType().getNumResultType();
Type t2 = node.getChild(1).getType().getNumResultType();
if (t1.isDecimalV3() || t2.isDecimalV3()) {
try {
node.rewriteDecimalOperation();
} catch (AnalysisException ex) {
throw new SemanticException(ex.getMessage());
}
Type lhsType = node.getChild(0).getType();
Type rhsType = node.getChild(1).getType();
Type resultType = node.getType();
Type[] args = {lhsType, rhsType};
Function fn = Expr.getBuiltinFunction(op.getName(), args, Function.CompareMode.IS_IDENTICAL);
Function newFn = new ScalarFunction(fn.getFunctionName(), args, resultType, fn.hasVarArgs());
node.setType(resultType);
node.setFn(newFn);
return null;
}
Type lhsType;
Type rhsType;
switch (op) {
case MULTIPLY:
case ADD:
case SUBTRACT:
lhsType = ArithmeticExpr.getBiggerType(ArithmeticExpr.getCommonType(t1, t2));
rhsType = lhsType;
break;
case MOD:
lhsType = ArithmeticExpr.getCommonType(t1, t2);
rhsType = lhsType;
break;
case DIVIDE:
lhsType = ArithmeticExpr.getCommonType(t1, t2);
if (lhsType.isFixedPointType()) {
lhsType = Type.DOUBLE;
}
rhsType = lhsType;
break;
case INT_DIVIDE:
case BITAND:
case BITOR:
case BITXOR:
lhsType = ArithmeticExpr.getCommonType(t1, t2);
if (!lhsType.isFixedPointType()) {
lhsType = Type.BIGINT;
}
rhsType = lhsType;
break;
case BIT_SHIFT_LEFT:
case BIT_SHIFT_RIGHT:
case BIT_SHIFT_RIGHT_LOGICAL:
lhsType = t1;
rhsType = Type.BIGINT;
break;
default:
throw new SemanticException("Unknown arithmetic operation " + op + " in: " + node,
node.getPos());
}
if (node.getChild(0).getType().equals(Type.NULL) && node.getChild(1).getType().equals(Type.NULL)) {
lhsType = Type.NULL;
rhsType = Type.NULL;
}
if (lhsType.isInvalid() || rhsType.isInvalid()) {
throw new SemanticException("Any function type can not cast to " + Type.INVALID.toSql());
}
if (!Type.NULL.equals(node.getChild(0).getType()) && !Type.canCastTo(t1, lhsType)) {
throw new SemanticException(
"cast type " + node.getChild(0).getType().toSql() + " with type " + lhsType.toSql()
+ " is invalid", node.getPos());
}
if (!Type.NULL.equals(node.getChild(1).getType()) && !Type.canCastTo(t2, rhsType)) {
throw new SemanticException(
"cast type " + node.getChild(1).getType().toSql() + " with type " + rhsType.toSql()
+ " is invalid", node.getPos());
}
Function fn = Expr.getBuiltinFunction(op.getName(), new Type[] {lhsType, rhsType},
Function.CompareMode.IS_SUPERTYPE_OF);
if (fn == null) {
throw new SemanticException(String.format(
"No matching function '%s' with operand types %s and %s", node.getOp().getName(), t1, t2));
}
/*
* commonType is the common type of the parameters of the function,
* and fn.getReturnType() is the return type of the function after execution
* So we use fn.getReturnType() as node type
*/
node.setType(fn.getReturnType());
node.setFn(fn);
} else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_PREFIX) {
Function fn = Expr.getBuiltinFunction(
node.getOp().getName(), new Type[] {Type.BIGINT}, Function.CompareMode.IS_SUPERTYPE_OF);
node.setType(Type.BIGINT);
node.setFn(fn);
} else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_POSTFIX) {
throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(),
node.getPos());
} else {
throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(),
node.getPos());
}
return null;
}
@Override
public Void visitTimestampArithmeticExpr(TimestampArithmeticExpr node, Scope scope) {
node.setChild(0, TypeManager.addCastExpr(node.getChild(0), Type.DATETIME));
String funcOpName;
if (node.getFuncName() != null) {
if (ADD_DATE_FUNCTIONS.contains(node.getFuncName())) {
funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "add");
} else if (SUB_DATE_FUNCTIONS.contains(node.getFuncName())) {
funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "sub");
} else {
node.setChild(1, TypeManager.addCastExpr(node.getChild(1), Type.DATETIME));
funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "diff");
}
} else {
funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(),
(node.getOp() == ArithmeticExpr.Operator.ADD) ? "add" : "sub");
}
Type[] argumentTypes = node.getChildren().stream().map(Expr::getType)
.toArray(Type[]::new);
Function fn = Expr.getBuiltinFunction(funcOpName.toLowerCase(), argumentTypes,
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
if (fn == null) {
String msg = String.format("No matching function with signature: %s(%s)", funcOpName, Joiner.on(", ")
.join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList())));
throw new SemanticException(msg, node.getPos());
}
node.setType(fn.getReturnType());
node.setFn(fn);
return null;
}
@Override
public Void visitExistsPredicate(ExistsPredicate node, Scope scope) {
predicateBaseAndCheck(node);
return null;
}
@Override
public Void visitInPredicate(InPredicate node, Scope scope) {
predicateBaseAndCheck(node);
List<Expr> queryExpressions = Lists.newArrayList();
node.collect(arg -> arg instanceof Subquery, queryExpressions);
if (queryExpressions.size() > 0 && node.getChildren().size() > 2) {
throw new SemanticException("In Predicate only support literal expression list", node.getPos());
}
List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList());
Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list);
for (Expr child : node.getChildren()) {
Type type = child.getType();
if (type.isJsonType()) {
throw new SemanticException("InPredicate of JSON is not supported", child.getPos());
}
if (!Type.canCastTo(type, compatibleType)) {
throw new SemanticException(
"in predicate type " + type.toSql() + " with type " + compatibleType.toSql()
+ " is invalid", child.getPos());
}
}
return null;
}
@Override
public Void visitMultiInPredicate(MultiInPredicate node, Scope scope) {
predicateBaseAndCheck(node);
List<Type> leftTypes =
node.getChildren().stream().limit(node.getNumberOfColumns()).map(Expr::getType)
.collect(Collectors.toList());
Subquery inSubquery = (Subquery) node.getChild(node.getNumberOfColumns());
List<Type> rightTypes =
inSubquery.getQueryStatement().getQueryRelation().getOutputExpression().stream().map(Expr::getType).
collect(Collectors.toList());
if (leftTypes.size() != rightTypes.size()) {
throw new SemanticException(
"subquery must return the same number of columns as provided by the IN predicate",
node.getPos());
}
for (int i = 0; i < rightTypes.size(); ++i) {
if (leftTypes.get(i).isJsonType() || rightTypes.get(i).isJsonType() || leftTypes.get(i).isMapType() ||
rightTypes.get(i).isMapType() || leftTypes.get(i).isStructType() ||
rightTypes.get(i).isStructType()) {
throw new SemanticException("InPredicate of JSON, Map, Struct types is not supported");
}
if (!Type.canCastTo(leftTypes.get(i), rightTypes.get(i))) {
throw new SemanticException(
"in predicate type " + leftTypes.get(i).toSql() + " with type " + rightTypes.get(i).toSql()
+ " is invalid");
}
}
return null;
}
@Override
public Void visitLiteral(LiteralExpr node, Scope scope) {
if (node instanceof LargeIntLiteral) {
BigInteger value = ((LargeIntLiteral) node).getValue();
if (value.compareTo(LargeIntLiteral.LARGE_INT_MIN) < 0 ||
value.compareTo(LargeIntLiteral.LARGE_INT_MAX) > 0) {
throw new SemanticException(PARSER_ERROR_MSG.numOverflow(value.toString()), node.getPos());
}
}
return null;
}
@Override
public Void visitIsNullPredicate(IsNullPredicate node, Scope scope) {
predicateBaseAndCheck(node);
return null;
}
@Override
public Void visitLikePredicate(LikePredicate node, Scope scope) {
predicateBaseAndCheck(node);
Type type1 = node.getChild(0).getType();
Type type2 = node.getChild(1).getType();
if (!type1.isStringType() && !type1.isNull()) {
throw new SemanticException(
"left operand of " + node.getOp().toString() + " must be of type STRING: " +
AstToStringBuilder.toString(node), node.getPos());
}
if (!type2.isStringType() && !type2.isNull()) {
throw new SemanticException(
"right operand of " + node.getOp().toString() + " must be of type STRING: " +
AstToStringBuilder.toString(node), node.getPos());
}
if (LikePredicate.Operator.REGEXP.equals(node.getOp()) && !type2.isNull() && node.getChild(1).isLiteral()) {
try {
Pattern.compile(((StringLiteral) node.getChild(1)).getValue());
} catch (PatternSyntaxException e) {
throw new SemanticException(
"Invalid regular expression in '" + AstToStringBuilder.toString(node) + "'", node.getPos());
}
}
return null;
}
private void predicateBaseAndCheck(Predicate node) {
node.setType(Type.BOOLEAN);
for (Expr expr : node.getChildren()) {
if (expr.getType().isOnlyMetricType() ||
(expr.getType().isComplexType() && !(node instanceof IsNullPredicate))) {
throw new SemanticException(
"HLL, BITMAP, PERCENTILE and ARRAY, MAP, STRUCT type couldn't as Predicate", node.getPos());
}
}
}
@Override
public Void visitCastExpr(CastExpr cast, Scope context) {
Type castType;
if (cast.isImplicit()) {
castType = cast.getType();
} else {
castType = cast.getTargetTypeDef().getType();
}
if (!Type.canCastTo(cast.getChild(0).getType(), castType)) {
throw new SemanticException("Invalid type cast from " + cast.getChild(0).getType().toSql() + " to "
+ castType.toSql() + " in sql `" +
AstToStringBuilder.toString(cast.getChild(0)).replace("%", "%%") + "`",
cast.getPos());
}
cast.setType(castType);
return null;
}
@Override
private void checkFunction(String fnName, FunctionCallExpr node) {
switch (fnName) {
case FunctionSet.TIME_SLICE:
case FunctionSet.DATE_SLICE:
if (!(node.getChild(1) instanceof IntLiteral)) {
throw new SemanticException(
fnName + " requires second parameter must be a constant interval", node.getPos());
}
if (((IntLiteral) node.getChild(1)).getValue() <= 0) {
throw new SemanticException(
fnName + " requires second parameter must be greater than 0", node.getPos());
}
break;
case FunctionSet.ARRAY_FILTER:
if (node.getChildren().size() != 2) {
throw new SemanticException(fnName + " should have 2 array inputs or lambda functions",
node.getPos());
}
if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) {
throw new SemanticException("The first input of " + fnName +
" should be an array or a lambda function", node.getPos());
}
if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) {
throw new SemanticException("The second input of " + fnName +
" should be an array or a lambda function", node.getPos());
}
if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) {
throw new SemanticException("The second input of array_filter " +
node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>", node.getPos());
}
break;
case FunctionSet.ARRAY_SORTBY:
if (node.getChildren().size() != 2) {
throw new SemanticException(fnName + " should have 2 array inputs or lambda functions",
node.getPos());
}
if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) {
throw new SemanticException("The first input of " + fnName +
" should be an array or a lambda function", node.getPos());
}
if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) {
throw new SemanticException("The second input of " + fnName +
" should be an array or a lambda function", node.getPos());
}
break;
case FunctionSet.ARRAY_CONCAT:
if (node.getChildren().size() < 2) {
throw new SemanticException(fnName + " should have at least two inputs", node.getPos());
}
break;
case FunctionSet.ARRAY_GENERATE:
if (node.getChildren().size() < 1 || node.getChildren().size() > 3) {
throw new SemanticException(fnName + " has wrong input numbers");
}
for (Expr expr : node.getChildren()) {
if ((expr instanceof SlotRef) && node.getChildren().size() != 3) {
throw new SemanticException(fnName + " with IntColumn doesn't support default parameters");
}
if (!(expr instanceof IntLiteral) && !(expr instanceof LargeIntLiteral) &&
!(expr instanceof SlotRef) && !(expr instanceof NullLiteral)) {
throw new SemanticException(fnName + "'s parameter only support Integer");
}
}
break;
case FunctionSet.MAP_FILTER:
if (node.getChildren().size() != 2) {
throw new SemanticException(fnName + " should have 2 inputs, " +
"but there are just " + node.getChildren().size() + " inputs.");
}
if (!node.getChild(0).getType().isMapType() && !node.getChild(0).getType().isNull()) {
throw new SemanticException("The first input of " + fnName +
" should be a map or a lambda function.");
}
if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) {
throw new SemanticException("The second input of " + fnName +
" should be a array or a lambda function.");
}
if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) {
throw new SemanticException("The second input of map_filter " +
node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>");
}
break;
}
}
private Function getStrToDateFunction(FunctionCallExpr node, Type[] argumentTypes) {
/*
* @TODO: Determine the return type of this function
* If is format is constant and don't contains time part, return date type, to compatible with mysql.
* In fact we don't want to support str_to_date return date like mysql, reason:
* 1. The return type of FE/BE str_to_date function signature is datetime, return date
* let type different, it's will throw unpredictable error
* 2. Support return date and datetime at same time in one function is complicated.
* 3. The meaning of the function is confusing. In mysql, will return date if format is a constant
* string and it's not contains "%H/%M/%S" pattern, but it's a trick logic, if format is a variable
* expression, like: str_to_date(col1, col2), and the col2 is '%Y%m%d', the result always be
* datetime.
*/
Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(),
argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
if (fn == null) {
return null;
}
if (!node.getChild(1).isConstant()) {
return fn;
}
ExpressionMapping expressionMapping =
new ExpressionMapping(new Scope(RelationId.anonymous(), new RelationFields()),
com.google.common.collect.Lists.newArrayList());
ScalarOperator format = SqlToScalarOperatorTranslator.translate(node.getChild(1), expressionMapping,
new ColumnRefFactory());
if (format.isConstantRef() && !HAS_TIME_PART.matcher(format.toString()).matches()) {
return Expr.getBuiltinFunction("str2date", argumentTypes,
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
}
return fn;
}
private Function getArrayGenerateFunction(FunctionCallExpr node) {
if (node.getChildren().size() == 1) {
LiteralExpr secondParam = (LiteralExpr) node.getChild(0);
node.clearChildren();
node.addChild(new IntLiteral(1));
node.addChild(secondParam);
}
if (node.getChildren().size() == 2) {
int idx = 0;
BigInteger[] childValues = new BigInteger[2];
Boolean hasNUll = false;
for (Expr expr : node.getChildren()) {
if (expr instanceof NullLiteral) {
hasNUll = true;
} else if (expr instanceof IntLiteral) {
childValues[idx++] = BigInteger.valueOf(((IntLiteral) expr).getValue());
} else {
childValues[idx++] = ((LargeIntLiteral) expr).getValue();
}
}
if (hasNUll || childValues[0].compareTo(childValues[1]) < 0) {
node.addChild(new IntLiteral(1));
} else {
node.addChild(new IntLiteral(-1));
}
}
Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new);
return Expr.getBuiltinFunction(FunctionSet.ARRAY_GENERATE, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF);
}
@Override
public Void visitGroupingFunctionCall(GroupingFunctionCallExpr node, Scope scope) {
if (node.getChildren().size() < 1) {
throw new SemanticException("GROUPING functions required at least one parameters", node.getPos());
}
if (node.getChildren().stream().anyMatch(e -> !(e instanceof SlotRef))) {
throw new SemanticException("grouping functions only support column", node.getPos());
}
Type[] childTypes = new Type[1];
childTypes[0] = Type.BIGINT;
Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(),
childTypes, Function.CompareMode.IS_IDENTICAL);
node.setFn(fn);
node.setType(fn.getReturnType());
return null;
}
@Override
public Void visitCaseWhenExpr(CaseExpr node, Scope context) {
int start = 0;
int end = node.getChildren().size();
Expr caseExpr = null;
Expr elseExpr = null;
if (node.hasCaseExpr()) {
caseExpr = node.getChild(0);
start++;
}
if (node.hasElseExpr()) {
elseExpr = node.getChild(end - 1);
end--;
}
if (node.getChildren().stream().anyMatch(d -> !d.getType().isScalarType())) {
throw new SemanticException("case-when only support scalar type", node.getPos());
}
List<Type> whenTypes = Lists.newArrayList();
if (null != caseExpr) {
whenTypes.add(caseExpr.getType());
}
for (int i = start; i < end; i = i + 2) {
whenTypes.add(node.getChild(i).getType());
}
Type compatibleType = Type.NULL;
if (null != caseExpr) {
compatibleType = TypeManager.getCompatibleTypeForCaseWhen(whenTypes);
}
for (Type type : whenTypes) {
if (!Type.canCastTo(type, compatibleType)) {
throw new SemanticException("Invalid when type cast " + type.toSql()
+ " to " + compatibleType.toSql(), node.getPos());
}
}
List<Type> thenTypes = Lists.newArrayList();
for (int i = start + 1; i < end; i = i + 2) {
thenTypes.add(node.getChild(i).getType());
}
if (null != elseExpr) {
thenTypes.add(elseExpr.getType());
}
Type returnType = thenTypes.stream().allMatch(Type.NULL::equals) ? Type.BOOLEAN :
TypeManager.getCompatibleTypeForCaseWhen(thenTypes);
for (Type type : thenTypes) {
if (!Type.canCastTo(type, returnType)) {
throw new SemanticException("Invalid then type cast " + type.toSql()
+ " to " + returnType.toSql(), node.getPos());
}
}
node.setType(returnType);
return null;
}
@Override
public Void visitSubquery(Subquery node, Scope context) {
QueryAnalyzer queryAnalyzer = new QueryAnalyzer(session);
queryAnalyzer.analyze(node.getQueryStatement(), context);
node.setType(node.getQueryStatement().getQueryRelation().getRelationFields().getFieldByIndex(0).getType());
return null;
}
@Override
public Void visitAnalyticExpr(AnalyticExpr node, Scope context) {
visit(node.getFnCall(), context);
node.setType(node.getFnCall().getType());
if (node.getWindow() != null) {
if (node.getWindow().getLeftBoundary() != null &&
node.getWindow().getLeftBoundary().getExpr() != null) {
visit(node.getWindow().getLeftBoundary().getExpr(), context);
}
if (node.getWindow().getRightBoundary() != null &&
node.getWindow().getRightBoundary().getExpr() != null) {
visit(node.getWindow().getRightBoundary().getExpr(), context);
}
}
node.getPartitionExprs().forEach(e -> visit(e, context));
node.getOrderByElements().stream().map(OrderByElement::getExpr).forEach(e -> visit(e, context));
verifyAnalyticExpression(node);
return null;
}
@Override
public Void visitInformationFunction(InformationFunction node, Scope context) {
String funcType = node.getFuncType();
if (funcType.equalsIgnoreCase("DATABASE") || funcType.equalsIgnoreCase("SCHEMA")) {
node.setType(Type.VARCHAR);
node.setStrValue(ClusterNamespace.getNameFromFullName(session.getDatabase()));
} else if (funcType.equalsIgnoreCase("USER")) {
node.setType(Type.VARCHAR);
String user = session.getQualifiedUser();
String remoteIP = session.getRemoteIP();
node.setStrValue(new UserIdentity(user, remoteIP).toString());
} else if (funcType.equalsIgnoreCase("CURRENT_USER")) {
node.setType(Type.VARCHAR);
node.setStrValue(session.getCurrentUserIdentity().toString());
} else if (funcType.equalsIgnoreCase("CURRENT_ROLE")) {
node.setType(Type.VARCHAR);
AuthorizationManager manager = session.getGlobalStateMgr().getAuthorizationManager();
List<String> roleName = new ArrayList<>();
try {
for (Long roleId : session.getCurrentRoleIds()) {
RolePrivilegeCollection rolePrivilegeCollection =
manager.getRolePrivilegeCollectionUnlocked(roleId, false);
if (rolePrivilegeCollection != null) {
roleName.add(rolePrivilegeCollection.getName());
}
}
} catch (PrivilegeException e) {
throw new SemanticException(e.getMessage());
}
if (roleName.isEmpty()) {
node.setStrValue("NONE");
} else {
node.setStrValue(Joiner.on(", ").join(roleName));
}
} else if (funcType.equalsIgnoreCase("CONNECTION_ID")) {
node.setType(Type.BIGINT);
node.setIntValue(session.getConnectionId());
node.setStrValue("");
} else if (funcType.equalsIgnoreCase("CURRENT_CATALOG")) {
node.setType(Type.VARCHAR);
node.setStrValue(session.getCurrentCatalog().toString());
}
return null;
}
@Override
public Void visitVariableExpr(VariableExpr node, Scope context) {
try {
if (node.getSetType().equals(SetType.USER)) {
UserVariable userVariable = session.getUserVariables(node.getName());
if (userVariable == null) {
node.setType(Type.STRING);
node.setIsNull();
return null;
}
Type variableType = userVariable.getEvaluatedExpression().getType();
node.setType(variableType);
if (userVariable.getEvaluatedExpression() instanceof NullLiteral) {
node.setIsNull();
} else {
node.setValue(userVariable.getEvaluatedExpression().getRealObjectValue());
}
} else {
VariableMgr.fillValue(session.getSessionVariable(), node);
if (!Strings.isNullOrEmpty(node.getName()) &&
node.getName().equalsIgnoreCase(SessionVariable.SQL_MODE)) {
node.setType(Type.VARCHAR);
node.setValue(SqlModeHelper.decode((long) node.getValue()));
}
}
} catch (AnalysisException | DdlException e) {
throw new SemanticException(e.getMessage());
}
return null;
}
@Override
public Void visitDefaultValueExpr(DefaultValueExpr node, Scope context) {
node.setType(Type.VARCHAR);
return null;
}
@Override
public Void visitCloneExpr(CloneExpr node, Scope context) {
return null;
}
} |
Why u destroy the alignment? | public void requreThatWeCanGetTheZoneConfig() {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(true).build())
.zone(new Zone(SystemName.cd, Environment.test, RegionName.from("some-region"))).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = new ContainerCluster(root, "container0", "container1");
ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder();
cluster.getConfig(builder);
ConfigserverConfig config = new ConfigserverConfig(builder);
assertEquals(Environment.test.value(), config.environment());
assertEquals("some-region", config.region());
assertEquals("cd", config.system());
} | .zone(new Zone(SystemName.cd, Environment.test, RegionName.from("some-region"))).build(); | public void requreThatWeCanGetTheZoneConfig() {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(true).build())
.zone(new Zone(SystemName.cd, Environment.test, RegionName.from("some-region")))
.build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = new ContainerCluster(root, "container0", "container1");
ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder();
cluster.getConfig(builder);
ConfigserverConfig config = new ConfigserverConfig(builder);
assertEquals(Environment.test.value(), config.environment());
assertEquals("some-region", config.region());
assertEquals("cd", config.system());
} | class ContainerClusterTest {
@Test
public void requireThatDefaultMetricConsumerFactoryCanBeConfigured() {
ContainerCluster cluster = newContainerCluster();
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.YAMAS_SCOREBOARD);
assertEquals(MetricDefaultsConfig.Factory.Enum.YAMAS_SCOREBOARD,
getMetricDefaultsConfig(cluster).factory());
}
@Test
public void requireThatDefaultMetricConsumerFactoryMatchesConfigDefault() {
ContainerCluster cluster = newContainerCluster();
assertEquals(new MetricDefaultsConfig(new MetricDefaultsConfig.Builder()).factory(),
getMetricDefaultsConfig(cluster).factory());
}
@Test
public void requireThatClusterInfoIsPopulated() {
ContainerCluster cluster = newContainerCluster();
ClusterInfoConfig config = getClusterInfoConfig(cluster);
assertEquals("name", config.clusterId());
assertEquals(2, config.nodeCount());
assertEquals(2, config.services().size());
Iterator<ClusterInfoConfig.Services> iterator = config.services().iterator();
ClusterInfoConfig.Services service = iterator.next();
assertEquals("host-c1", service.hostname());
assertEquals(0, service.index());
assertEquals(4, service.ports().size());
service = iterator.next();
assertEquals("host-c2", service.hostname());
assertEquals(1, service.index());
assertEquals(4, service.ports().size());
}
@Test
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster) {
return createContainerCluster(isHosted, isCombinedCluster, Optional.empty(), Optional.empty());
}
private ContainerCluster createClusterControllerCluster() {
return createContainerCluster(false, false, new VerifyClusterControllerCluster());
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster, ContainerClusterVerifier extraComponents) {
return createContainerCluster(isHosted, isCombinedCluster, Optional.empty(), Optional.of(extraComponents));
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> memoryPercentage) {
return createContainerCluster(isHosted, isCombinedCluster, memoryPercentage, Optional.empty());
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> memoryPercentage, Optional<ContainerClusterVerifier> extraComponents) {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(isHosted).build()).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = extraComponents.isPresent()
? new ContainerCluster(root, "container0", "container1", extraComponents.get())
: new ContainerCluster(root, "container0", "container1");
if (isCombinedCluster)
cluster.setHostClusterId("test-content-cluster");
cluster.setMemoryPercentage(memoryPercentage);
cluster.setSearch(new ContainerSearch(cluster, new SearchChains(cluster, "search-chain"), new ContainerSearch.Options()));
return cluster;
}
private void verifyHeapSizeAsPercentageOfPhysicalMemory(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> explicitMemoryPercentage,
int expectedMemoryPercentage) {
ContainerCluster cluster = createContainerCluster(isHosted, isCombinedCluster, explicitMemoryPercentage);
QrStartConfig.Builder qsB = new QrStartConfig.Builder();
cluster.getSearch().getConfig(qsB);
QrStartConfig qsC= new QrStartConfig(qsB);
assertEquals(expectedMemoryPercentage, qsC.jvm().heapSizeAsPercentageOfPhysicalMemory());
}
@Test
public void requireThatHeapSizeAsPercentageOfPhysicalMemoryForHostedAndNot() {
boolean hosted = true;
boolean combined = true;
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, ! combined, Optional.empty(), 60);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, combined, Optional.empty(), 17);
verifyHeapSizeAsPercentageOfPhysicalMemory(! hosted, ! combined, Optional.empty(), 0);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, ! combined, Optional.of(67), 67);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, combined, Optional.of(68), 68);
verifyHeapSizeAsPercentageOfPhysicalMemory(! hosted, ! combined, Optional.of(69), 69);
}
private void verifyJvmArgs(boolean isHosted, boolean hasDocproc, String expectedArgs, String jvmArgs) {
if (isHosted && hasDocproc) {
String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage";
if ( ! "".equals(expectedArgs)) {
defaultHostedJVMArgs = defaultHostedJVMArgs + " ";
}
assertEquals(defaultHostedJVMArgs + expectedArgs, jvmArgs);
} else {
assertEquals(expectedArgs, jvmArgs);
}
}
private void verifyJvmArgs(boolean isHosted, boolean hasDocProc) {
ContainerCluster cluster = createContainerCluster(isHosted, false);
if (hasDocProc) {
cluster.setDocproc(new ContainerDocproc(cluster, null));
}
addContainer(cluster, "c1", "host-c1");
assertEquals(1, cluster.getContainers().size());
Container container = cluster.getContainers().get(0);
verifyJvmArgs(isHosted, hasDocProc, "", container.getJvmArgs());
container.setJvmArgs("initial");
verifyJvmArgs(isHosted, hasDocProc, "initial", container.getJvmArgs());
container.prependJvmArgs("ignored");
verifyJvmArgs(isHosted, hasDocProc, "ignored initial", container.getJvmArgs());
container.appendJvmArgs("override");
verifyJvmArgs(isHosted, hasDocProc, "ignored initial override", container.getJvmArgs());
container.setJvmArgs(null);
verifyJvmArgs(isHosted, hasDocProc, "", container.getJvmArgs());
}
@Test
public void testClusterControllerResourceUsage() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
assertEquals(1, cluster.getContainers().size());
ClusterControllerContainer container = (ClusterControllerContainer) cluster.getContainers().get(0);
QrStartConfig.Builder qrBuilder = new QrStartConfig.Builder();
container.getConfig(qrBuilder);
QrStartConfig qrStartConfig = new QrStartConfig(qrBuilder);
assertEquals(512, qrStartConfig.jvm().heapsize());
ThreadpoolConfig.Builder tpBuilder = new ThreadpoolConfig.Builder();
container.getConfig(tpBuilder);
ThreadpoolConfig threadpoolConfig = new ThreadpoolConfig(tpBuilder);
assertEquals(10, threadpoolConfig.maxthreads());
}
@Test
public void testThatYouCanNotAddNonClusterControllerContainerToClusterControllerCluster() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
try {
addContainer(cluster, "c2", "host-c2");
assertTrue(false);
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Cluster container1 does not accept container com.yahoo.vespa.model.container.Container"));
}
}
@Test
public void testThatLinguisticsIsExcludedForClusterControllerCluster() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
assertFalse(contains("com.yahoo.language.provider.SimpleLinguisticsProvider", cluster.getAllComponents()));
}
@Test
public void testThatLinguisticsIsIncludedForNonClusterControllerClusters() {
ContainerCluster cluster = createContainerCluster(false, false);
addClusterController(cluster, "host-c1");
assertTrue(contains("com.yahoo.language.provider.SimpleLinguisticsProvider", cluster.getAllComponents()));
}
private static boolean contains(String componentId, Collection<Component<?, ?>> componentList) {
for (Component<?, ?> component : componentList)
if (component.getClassId().toId().getName().equals(componentId))
return true;
return false;
}
@Test
public void requireThatJvmArgsControlWorksForHostedAndNot() {
verifyJvmArgs(true, false);
verifyJvmArgs(true, true);
verifyJvmArgs(false, false);
verifyJvmArgs(false, true);
}
@Test
public void requireThatWeCanhandleNull() {
ContainerCluster cluster = createContainerCluster(false, false);
addContainer(cluster, "c1", "host-c1");
Container container = cluster.getContainers().get(0);
container.setJvmArgs("");
String empty = container.getJvmArgs();
container.setJvmArgs(null);
assertEquals(empty, container.getJvmArgs());
}
@Test
public void requireThatRoutingProviderIsDisabledForNonHosted() {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(false).build()).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = new ContainerCluster(root, "container0", "container1");
RoutingProviderConfig.Builder builder = new RoutingProviderConfig.Builder();
cluster.getConfig(builder);
RoutingProviderConfig config = new RoutingProviderConfig(builder);
assertFalse(config.enabled());
assertEquals(0, cluster.getAllComponents().stream().map(c -> c.getClassId().getName()).filter(c -> c.equals("com.yahoo.jdisc.http.filter.security.RoutingConfigProvider")).count());
}
private static void addContainer(ContainerCluster cluster, String name, String hostName) {
Container container = new Container(cluster, name, 0);
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService();
cluster.addContainer(container);
}
private static void addClusterController(ContainerCluster cluster, String hostName) {
Container container = new ClusterControllerContainer(cluster, 1, false);
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService();
cluster.addContainer(container);
}
private static ContainerCluster newContainerCluster() {
ContainerCluster cluster = new ContainerCluster(null, "subId", "name");
addContainer(cluster, "c1", "host-c1");
addContainer(cluster, "c2", "host-c2");
return cluster;
}
private static MetricDefaultsConfig getMetricDefaultsConfig(ContainerCluster cluster) {
MetricDefaultsConfig.Builder builder = new MetricDefaultsConfig.Builder();
cluster.getConfig(builder);
return new MetricDefaultsConfig(builder);
}
private static ClusterInfoConfig getClusterInfoConfig(ContainerCluster cluster) {
ClusterInfoConfig.Builder builder = new ClusterInfoConfig.Builder();
cluster.getConfig(builder);
return new ClusterInfoConfig(builder);
}
} | class ContainerClusterTest {
@Test
public void requireThatDefaultMetricConsumerFactoryCanBeConfigured() {
ContainerCluster cluster = newContainerCluster();
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.YAMAS_SCOREBOARD);
assertEquals(MetricDefaultsConfig.Factory.Enum.YAMAS_SCOREBOARD,
getMetricDefaultsConfig(cluster).factory());
}
@Test
public void requireThatDefaultMetricConsumerFactoryMatchesConfigDefault() {
ContainerCluster cluster = newContainerCluster();
assertEquals(new MetricDefaultsConfig(new MetricDefaultsConfig.Builder()).factory(),
getMetricDefaultsConfig(cluster).factory());
}
@Test
public void requireThatClusterInfoIsPopulated() {
ContainerCluster cluster = newContainerCluster();
ClusterInfoConfig config = getClusterInfoConfig(cluster);
assertEquals("name", config.clusterId());
assertEquals(2, config.nodeCount());
assertEquals(2, config.services().size());
Iterator<ClusterInfoConfig.Services> iterator = config.services().iterator();
ClusterInfoConfig.Services service = iterator.next();
assertEquals("host-c1", service.hostname());
assertEquals(0, service.index());
assertEquals(4, service.ports().size());
service = iterator.next();
assertEquals("host-c2", service.hostname());
assertEquals(1, service.index());
assertEquals(4, service.ports().size());
}
@Test
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster) {
return createContainerCluster(isHosted, isCombinedCluster, Optional.empty(), Optional.empty());
}
private ContainerCluster createClusterControllerCluster() {
return createContainerCluster(false, false, new ClusterControllerClusterVerifier());
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster, ContainerClusterVerifier extraComponents) {
return createContainerCluster(isHosted, isCombinedCluster, Optional.empty(), Optional.of(extraComponents));
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> memoryPercentage) {
return createContainerCluster(isHosted, isCombinedCluster, memoryPercentage, Optional.empty());
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> memoryPercentage, Optional<ContainerClusterVerifier> extraComponents) {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(isHosted).build()).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = extraComponents.isPresent()
? new ContainerCluster(root, "container0", "container1", extraComponents.get())
: new ContainerCluster(root, "container0", "container1");
if (isCombinedCluster)
cluster.setHostClusterId("test-content-cluster");
cluster.setMemoryPercentage(memoryPercentage);
cluster.setSearch(new ContainerSearch(cluster, new SearchChains(cluster, "search-chain"), new ContainerSearch.Options()));
return cluster;
}
private void verifyHeapSizeAsPercentageOfPhysicalMemory(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> explicitMemoryPercentage,
int expectedMemoryPercentage) {
ContainerCluster cluster = createContainerCluster(isHosted, isCombinedCluster, explicitMemoryPercentage);
QrStartConfig.Builder qsB = new QrStartConfig.Builder();
cluster.getSearch().getConfig(qsB);
QrStartConfig qsC= new QrStartConfig(qsB);
assertEquals(expectedMemoryPercentage, qsC.jvm().heapSizeAsPercentageOfPhysicalMemory());
}
@Test
public void requireThatHeapSizeAsPercentageOfPhysicalMemoryForHostedAndNot() {
boolean hosted = true;
boolean combined = true;
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, ! combined, Optional.empty(), 60);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, combined, Optional.empty(), 17);
verifyHeapSizeAsPercentageOfPhysicalMemory(! hosted, ! combined, Optional.empty(), 0);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, ! combined, Optional.of(67), 67);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, combined, Optional.of(68), 68);
verifyHeapSizeAsPercentageOfPhysicalMemory(! hosted, ! combined, Optional.of(69), 69);
}
private void verifyJvmArgs(boolean isHosted, boolean hasDocproc, String expectedArgs, String jvmArgs) {
if (isHosted && hasDocproc) {
String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage";
if ( ! "".equals(expectedArgs)) {
defaultHostedJVMArgs = defaultHostedJVMArgs + " ";
}
assertEquals(defaultHostedJVMArgs + expectedArgs, jvmArgs);
} else {
assertEquals(expectedArgs, jvmArgs);
}
}
private void verifyJvmArgs(boolean isHosted, boolean hasDocProc) {
ContainerCluster cluster = createContainerCluster(isHosted, false);
if (hasDocProc) {
cluster.setDocproc(new ContainerDocproc(cluster, null));
}
addContainer(cluster, "c1", "host-c1");
assertEquals(1, cluster.getContainers().size());
Container container = cluster.getContainers().get(0);
verifyJvmArgs(isHosted, hasDocProc, "", container.getJvmArgs());
container.setJvmArgs("initial");
verifyJvmArgs(isHosted, hasDocProc, "initial", container.getJvmArgs());
container.prependJvmArgs("ignored");
verifyJvmArgs(isHosted, hasDocProc, "ignored initial", container.getJvmArgs());
container.appendJvmArgs("override");
verifyJvmArgs(isHosted, hasDocProc, "ignored initial override", container.getJvmArgs());
container.setJvmArgs(null);
verifyJvmArgs(isHosted, hasDocProc, "", container.getJvmArgs());
}
@Test
public void testClusterControllerResourceUsage() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
assertEquals(1, cluster.getContainers().size());
ClusterControllerContainer container = (ClusterControllerContainer) cluster.getContainers().get(0);
QrStartConfig.Builder qrBuilder = new QrStartConfig.Builder();
container.getConfig(qrBuilder);
QrStartConfig qrStartConfig = new QrStartConfig(qrBuilder);
assertEquals(512, qrStartConfig.jvm().heapsize());
ThreadpoolConfig.Builder tpBuilder = new ThreadpoolConfig.Builder();
container.getConfig(tpBuilder);
ThreadpoolConfig threadpoolConfig = new ThreadpoolConfig(tpBuilder);
assertEquals(10, threadpoolConfig.maxthreads());
}
@Test
public void testThatYouCanNotAddNonClusterControllerContainerToClusterControllerCluster() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
try {
addContainer(cluster, "c2", "host-c2");
assertTrue(false);
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Cluster container1 does not accept container com.yahoo.vespa.model.container.Container"));
}
}
@Test
public void testThatLinguisticsIsExcludedForClusterControllerCluster() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
assertFalse(contains("com.yahoo.language.provider.SimpleLinguisticsProvider", cluster.getAllComponents()));
}
@Test
public void testThatLinguisticsIsIncludedForNonClusterControllerClusters() {
ContainerCluster cluster = createContainerCluster(false, false);
addClusterController(cluster, "host-c1");
assertTrue(contains("com.yahoo.language.provider.SimpleLinguisticsProvider", cluster.getAllComponents()));
}
private static boolean contains(String componentId, Collection<Component<?, ?>> componentList) {
for (Component<?, ?> component : componentList)
if (component.getClassId().toId().getName().equals(componentId))
return true;
return false;
}
@Test
public void requireThatJvmArgsControlWorksForHostedAndNot() {
verifyJvmArgs(true, false);
verifyJvmArgs(true, true);
verifyJvmArgs(false, false);
verifyJvmArgs(false, true);
}
@Test
public void requireThatWeCanhandleNull() {
ContainerCluster cluster = createContainerCluster(false, false);
addContainer(cluster, "c1", "host-c1");
Container container = cluster.getContainers().get(0);
container.setJvmArgs("");
String empty = container.getJvmArgs();
container.setJvmArgs(null);
assertEquals(empty, container.getJvmArgs());
}
@Test
public void requireThatRoutingProviderIsDisabledForNonHosted() {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(false).build()).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = new ContainerCluster(root, "container0", "container1");
RoutingProviderConfig.Builder builder = new RoutingProviderConfig.Builder();
cluster.getConfig(builder);
RoutingProviderConfig config = new RoutingProviderConfig(builder);
assertFalse(config.enabled());
assertEquals(0, cluster.getAllComponents().stream().map(c -> c.getClassId().getName()).filter(c -> c.equals("com.yahoo.jdisc.http.filter.security.RoutingConfigProvider")).count());
}
private static void addContainer(ContainerCluster cluster, String name, String hostName) {
Container container = new Container(cluster, name, 0);
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService();
cluster.addContainer(container);
}
private static void addClusterController(ContainerCluster cluster, String hostName) {
Container container = new ClusterControllerContainer(cluster, 1, false);
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService();
cluster.addContainer(container);
}
private static ContainerCluster newContainerCluster() {
ContainerCluster cluster = new ContainerCluster(null, "subId", "name");
addContainer(cluster, "c1", "host-c1");
addContainer(cluster, "c2", "host-c2");
return cluster;
}
private static MetricDefaultsConfig getMetricDefaultsConfig(ContainerCluster cluster) {
MetricDefaultsConfig.Builder builder = new MetricDefaultsConfig.Builder();
cluster.getConfig(builder);
return new MetricDefaultsConfig(builder);
}
private static ClusterInfoConfig getClusterInfoConfig(ContainerCluster cluster) {
ClusterInfoConfig.Builder builder = new ClusterInfoConfig.Builder();
cluster.getConfig(builder);
return new ClusterInfoConfig(builder);
}
} |
Non intentional. Harder to get accidents with VI. Combination with accidental press of backspace or enter combined with intellij automatic indentation. | public void requreThatWeCanGetTheZoneConfig() {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(true).build())
.zone(new Zone(SystemName.cd, Environment.test, RegionName.from("some-region"))).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = new ContainerCluster(root, "container0", "container1");
ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder();
cluster.getConfig(builder);
ConfigserverConfig config = new ConfigserverConfig(builder);
assertEquals(Environment.test.value(), config.environment());
assertEquals("some-region", config.region());
assertEquals("cd", config.system());
} | .zone(new Zone(SystemName.cd, Environment.test, RegionName.from("some-region"))).build(); | public void requreThatWeCanGetTheZoneConfig() {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(true).build())
.zone(new Zone(SystemName.cd, Environment.test, RegionName.from("some-region")))
.build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = new ContainerCluster(root, "container0", "container1");
ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder();
cluster.getConfig(builder);
ConfigserverConfig config = new ConfigserverConfig(builder);
assertEquals(Environment.test.value(), config.environment());
assertEquals("some-region", config.region());
assertEquals("cd", config.system());
} | class ContainerClusterTest {
@Test
public void requireThatDefaultMetricConsumerFactoryCanBeConfigured() {
ContainerCluster cluster = newContainerCluster();
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.YAMAS_SCOREBOARD);
assertEquals(MetricDefaultsConfig.Factory.Enum.YAMAS_SCOREBOARD,
getMetricDefaultsConfig(cluster).factory());
}
@Test
public void requireThatDefaultMetricConsumerFactoryMatchesConfigDefault() {
ContainerCluster cluster = newContainerCluster();
assertEquals(new MetricDefaultsConfig(new MetricDefaultsConfig.Builder()).factory(),
getMetricDefaultsConfig(cluster).factory());
}
@Test
public void requireThatClusterInfoIsPopulated() {
ContainerCluster cluster = newContainerCluster();
ClusterInfoConfig config = getClusterInfoConfig(cluster);
assertEquals("name", config.clusterId());
assertEquals(2, config.nodeCount());
assertEquals(2, config.services().size());
Iterator<ClusterInfoConfig.Services> iterator = config.services().iterator();
ClusterInfoConfig.Services service = iterator.next();
assertEquals("host-c1", service.hostname());
assertEquals(0, service.index());
assertEquals(4, service.ports().size());
service = iterator.next();
assertEquals("host-c2", service.hostname());
assertEquals(1, service.index());
assertEquals(4, service.ports().size());
}
@Test
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster) {
return createContainerCluster(isHosted, isCombinedCluster, Optional.empty(), Optional.empty());
}
private ContainerCluster createClusterControllerCluster() {
return createContainerCluster(false, false, new VerifyClusterControllerCluster());
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster, ContainerClusterVerifier extraComponents) {
return createContainerCluster(isHosted, isCombinedCluster, Optional.empty(), Optional.of(extraComponents));
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> memoryPercentage) {
return createContainerCluster(isHosted, isCombinedCluster, memoryPercentage, Optional.empty());
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> memoryPercentage, Optional<ContainerClusterVerifier> extraComponents) {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(isHosted).build()).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = extraComponents.isPresent()
? new ContainerCluster(root, "container0", "container1", extraComponents.get())
: new ContainerCluster(root, "container0", "container1");
if (isCombinedCluster)
cluster.setHostClusterId("test-content-cluster");
cluster.setMemoryPercentage(memoryPercentage);
cluster.setSearch(new ContainerSearch(cluster, new SearchChains(cluster, "search-chain"), new ContainerSearch.Options()));
return cluster;
}
private void verifyHeapSizeAsPercentageOfPhysicalMemory(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> explicitMemoryPercentage,
int expectedMemoryPercentage) {
ContainerCluster cluster = createContainerCluster(isHosted, isCombinedCluster, explicitMemoryPercentage);
QrStartConfig.Builder qsB = new QrStartConfig.Builder();
cluster.getSearch().getConfig(qsB);
QrStartConfig qsC= new QrStartConfig(qsB);
assertEquals(expectedMemoryPercentage, qsC.jvm().heapSizeAsPercentageOfPhysicalMemory());
}
@Test
public void requireThatHeapSizeAsPercentageOfPhysicalMemoryForHostedAndNot() {
boolean hosted = true;
boolean combined = true;
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, ! combined, Optional.empty(), 60);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, combined, Optional.empty(), 17);
verifyHeapSizeAsPercentageOfPhysicalMemory(! hosted, ! combined, Optional.empty(), 0);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, ! combined, Optional.of(67), 67);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, combined, Optional.of(68), 68);
verifyHeapSizeAsPercentageOfPhysicalMemory(! hosted, ! combined, Optional.of(69), 69);
}
private void verifyJvmArgs(boolean isHosted, boolean hasDocproc, String expectedArgs, String jvmArgs) {
if (isHosted && hasDocproc) {
String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage";
if ( ! "".equals(expectedArgs)) {
defaultHostedJVMArgs = defaultHostedJVMArgs + " ";
}
assertEquals(defaultHostedJVMArgs + expectedArgs, jvmArgs);
} else {
assertEquals(expectedArgs, jvmArgs);
}
}
private void verifyJvmArgs(boolean isHosted, boolean hasDocProc) {
ContainerCluster cluster = createContainerCluster(isHosted, false);
if (hasDocProc) {
cluster.setDocproc(new ContainerDocproc(cluster, null));
}
addContainer(cluster, "c1", "host-c1");
assertEquals(1, cluster.getContainers().size());
Container container = cluster.getContainers().get(0);
verifyJvmArgs(isHosted, hasDocProc, "", container.getJvmArgs());
container.setJvmArgs("initial");
verifyJvmArgs(isHosted, hasDocProc, "initial", container.getJvmArgs());
container.prependJvmArgs("ignored");
verifyJvmArgs(isHosted, hasDocProc, "ignored initial", container.getJvmArgs());
container.appendJvmArgs("override");
verifyJvmArgs(isHosted, hasDocProc, "ignored initial override", container.getJvmArgs());
container.setJvmArgs(null);
verifyJvmArgs(isHosted, hasDocProc, "", container.getJvmArgs());
}
@Test
public void testClusterControllerResourceUsage() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
assertEquals(1, cluster.getContainers().size());
ClusterControllerContainer container = (ClusterControllerContainer) cluster.getContainers().get(0);
QrStartConfig.Builder qrBuilder = new QrStartConfig.Builder();
container.getConfig(qrBuilder);
QrStartConfig qrStartConfig = new QrStartConfig(qrBuilder);
assertEquals(512, qrStartConfig.jvm().heapsize());
ThreadpoolConfig.Builder tpBuilder = new ThreadpoolConfig.Builder();
container.getConfig(tpBuilder);
ThreadpoolConfig threadpoolConfig = new ThreadpoolConfig(tpBuilder);
assertEquals(10, threadpoolConfig.maxthreads());
}
@Test
public void testThatYouCanNotAddNonClusterControllerContainerToClusterControllerCluster() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
try {
addContainer(cluster, "c2", "host-c2");
assertTrue(false);
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Cluster container1 does not accept container com.yahoo.vespa.model.container.Container"));
}
}
@Test
public void testThatLinguisticsIsExcludedForClusterControllerCluster() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
assertFalse(contains("com.yahoo.language.provider.SimpleLinguisticsProvider", cluster.getAllComponents()));
}
@Test
public void testThatLinguisticsIsIncludedForNonClusterControllerClusters() {
ContainerCluster cluster = createContainerCluster(false, false);
addClusterController(cluster, "host-c1");
assertTrue(contains("com.yahoo.language.provider.SimpleLinguisticsProvider", cluster.getAllComponents()));
}
private static boolean contains(String componentId, Collection<Component<?, ?>> componentList) {
for (Component<?, ?> component : componentList)
if (component.getClassId().toId().getName().equals(componentId))
return true;
return false;
}
@Test
public void requireThatJvmArgsControlWorksForHostedAndNot() {
verifyJvmArgs(true, false);
verifyJvmArgs(true, true);
verifyJvmArgs(false, false);
verifyJvmArgs(false, true);
}
@Test
public void requireThatWeCanhandleNull() {
ContainerCluster cluster = createContainerCluster(false, false);
addContainer(cluster, "c1", "host-c1");
Container container = cluster.getContainers().get(0);
container.setJvmArgs("");
String empty = container.getJvmArgs();
container.setJvmArgs(null);
assertEquals(empty, container.getJvmArgs());
}
@Test
public void requireThatRoutingProviderIsDisabledForNonHosted() {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(false).build()).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = new ContainerCluster(root, "container0", "container1");
RoutingProviderConfig.Builder builder = new RoutingProviderConfig.Builder();
cluster.getConfig(builder);
RoutingProviderConfig config = new RoutingProviderConfig(builder);
assertFalse(config.enabled());
assertEquals(0, cluster.getAllComponents().stream().map(c -> c.getClassId().getName()).filter(c -> c.equals("com.yahoo.jdisc.http.filter.security.RoutingConfigProvider")).count());
}
private static void addContainer(ContainerCluster cluster, String name, String hostName) {
Container container = new Container(cluster, name, 0);
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService();
cluster.addContainer(container);
}
private static void addClusterController(ContainerCluster cluster, String hostName) {
Container container = new ClusterControllerContainer(cluster, 1, false);
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService();
cluster.addContainer(container);
}
private static ContainerCluster newContainerCluster() {
ContainerCluster cluster = new ContainerCluster(null, "subId", "name");
addContainer(cluster, "c1", "host-c1");
addContainer(cluster, "c2", "host-c2");
return cluster;
}
private static MetricDefaultsConfig getMetricDefaultsConfig(ContainerCluster cluster) {
MetricDefaultsConfig.Builder builder = new MetricDefaultsConfig.Builder();
cluster.getConfig(builder);
return new MetricDefaultsConfig(builder);
}
private static ClusterInfoConfig getClusterInfoConfig(ContainerCluster cluster) {
ClusterInfoConfig.Builder builder = new ClusterInfoConfig.Builder();
cluster.getConfig(builder);
return new ClusterInfoConfig(builder);
}
} | class ContainerClusterTest {
@Test
public void requireThatDefaultMetricConsumerFactoryCanBeConfigured() {
ContainerCluster cluster = newContainerCluster();
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.YAMAS_SCOREBOARD);
assertEquals(MetricDefaultsConfig.Factory.Enum.YAMAS_SCOREBOARD,
getMetricDefaultsConfig(cluster).factory());
}
@Test
public void requireThatDefaultMetricConsumerFactoryMatchesConfigDefault() {
ContainerCluster cluster = newContainerCluster();
assertEquals(new MetricDefaultsConfig(new MetricDefaultsConfig.Builder()).factory(),
getMetricDefaultsConfig(cluster).factory());
}
@Test
public void requireThatClusterInfoIsPopulated() {
ContainerCluster cluster = newContainerCluster();
ClusterInfoConfig config = getClusterInfoConfig(cluster);
assertEquals("name", config.clusterId());
assertEquals(2, config.nodeCount());
assertEquals(2, config.services().size());
Iterator<ClusterInfoConfig.Services> iterator = config.services().iterator();
ClusterInfoConfig.Services service = iterator.next();
assertEquals("host-c1", service.hostname());
assertEquals(0, service.index());
assertEquals(4, service.ports().size());
service = iterator.next();
assertEquals("host-c2", service.hostname());
assertEquals(1, service.index());
assertEquals(4, service.ports().size());
}
@Test
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster) {
return createContainerCluster(isHosted, isCombinedCluster, Optional.empty(), Optional.empty());
}
private ContainerCluster createClusterControllerCluster() {
return createContainerCluster(false, false, new ClusterControllerClusterVerifier());
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster, ContainerClusterVerifier extraComponents) {
return createContainerCluster(isHosted, isCombinedCluster, Optional.empty(), Optional.of(extraComponents));
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> memoryPercentage) {
return createContainerCluster(isHosted, isCombinedCluster, memoryPercentage, Optional.empty());
}
private ContainerCluster createContainerCluster(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> memoryPercentage, Optional<ContainerClusterVerifier> extraComponents) {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(isHosted).build()).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = extraComponents.isPresent()
? new ContainerCluster(root, "container0", "container1", extraComponents.get())
: new ContainerCluster(root, "container0", "container1");
if (isCombinedCluster)
cluster.setHostClusterId("test-content-cluster");
cluster.setMemoryPercentage(memoryPercentage);
cluster.setSearch(new ContainerSearch(cluster, new SearchChains(cluster, "search-chain"), new ContainerSearch.Options()));
return cluster;
}
private void verifyHeapSizeAsPercentageOfPhysicalMemory(boolean isHosted, boolean isCombinedCluster,
Optional<Integer> explicitMemoryPercentage,
int expectedMemoryPercentage) {
ContainerCluster cluster = createContainerCluster(isHosted, isCombinedCluster, explicitMemoryPercentage);
QrStartConfig.Builder qsB = new QrStartConfig.Builder();
cluster.getSearch().getConfig(qsB);
QrStartConfig qsC= new QrStartConfig(qsB);
assertEquals(expectedMemoryPercentage, qsC.jvm().heapSizeAsPercentageOfPhysicalMemory());
}
@Test
public void requireThatHeapSizeAsPercentageOfPhysicalMemoryForHostedAndNot() {
boolean hosted = true;
boolean combined = true;
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, ! combined, Optional.empty(), 60);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, combined, Optional.empty(), 17);
verifyHeapSizeAsPercentageOfPhysicalMemory(! hosted, ! combined, Optional.empty(), 0);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, ! combined, Optional.of(67), 67);
verifyHeapSizeAsPercentageOfPhysicalMemory( hosted, combined, Optional.of(68), 68);
verifyHeapSizeAsPercentageOfPhysicalMemory(! hosted, ! combined, Optional.of(69), 69);
}
private void verifyJvmArgs(boolean isHosted, boolean hasDocproc, String expectedArgs, String jvmArgs) {
if (isHosted && hasDocproc) {
String defaultHostedJVMArgs = "-XX:+UseOSErrorReporting -XX:+SuppressFatalErrorMessage";
if ( ! "".equals(expectedArgs)) {
defaultHostedJVMArgs = defaultHostedJVMArgs + " ";
}
assertEquals(defaultHostedJVMArgs + expectedArgs, jvmArgs);
} else {
assertEquals(expectedArgs, jvmArgs);
}
}
private void verifyJvmArgs(boolean isHosted, boolean hasDocProc) {
ContainerCluster cluster = createContainerCluster(isHosted, false);
if (hasDocProc) {
cluster.setDocproc(new ContainerDocproc(cluster, null));
}
addContainer(cluster, "c1", "host-c1");
assertEquals(1, cluster.getContainers().size());
Container container = cluster.getContainers().get(0);
verifyJvmArgs(isHosted, hasDocProc, "", container.getJvmArgs());
container.setJvmArgs("initial");
verifyJvmArgs(isHosted, hasDocProc, "initial", container.getJvmArgs());
container.prependJvmArgs("ignored");
verifyJvmArgs(isHosted, hasDocProc, "ignored initial", container.getJvmArgs());
container.appendJvmArgs("override");
verifyJvmArgs(isHosted, hasDocProc, "ignored initial override", container.getJvmArgs());
container.setJvmArgs(null);
verifyJvmArgs(isHosted, hasDocProc, "", container.getJvmArgs());
}
@Test
public void testClusterControllerResourceUsage() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
assertEquals(1, cluster.getContainers().size());
ClusterControllerContainer container = (ClusterControllerContainer) cluster.getContainers().get(0);
QrStartConfig.Builder qrBuilder = new QrStartConfig.Builder();
container.getConfig(qrBuilder);
QrStartConfig qrStartConfig = new QrStartConfig(qrBuilder);
assertEquals(512, qrStartConfig.jvm().heapsize());
ThreadpoolConfig.Builder tpBuilder = new ThreadpoolConfig.Builder();
container.getConfig(tpBuilder);
ThreadpoolConfig threadpoolConfig = new ThreadpoolConfig(tpBuilder);
assertEquals(10, threadpoolConfig.maxthreads());
}
@Test
public void testThatYouCanNotAddNonClusterControllerContainerToClusterControllerCluster() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
try {
addContainer(cluster, "c2", "host-c2");
assertTrue(false);
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("Cluster container1 does not accept container com.yahoo.vespa.model.container.Container"));
}
}
@Test
public void testThatLinguisticsIsExcludedForClusterControllerCluster() {
ContainerCluster cluster = createClusterControllerCluster();
addClusterController(cluster, "host-c1");
assertFalse(contains("com.yahoo.language.provider.SimpleLinguisticsProvider", cluster.getAllComponents()));
}
@Test
public void testThatLinguisticsIsIncludedForNonClusterControllerClusters() {
ContainerCluster cluster = createContainerCluster(false, false);
addClusterController(cluster, "host-c1");
assertTrue(contains("com.yahoo.language.provider.SimpleLinguisticsProvider", cluster.getAllComponents()));
}
private static boolean contains(String componentId, Collection<Component<?, ?>> componentList) {
for (Component<?, ?> component : componentList)
if (component.getClassId().toId().getName().equals(componentId))
return true;
return false;
}
@Test
public void requireThatJvmArgsControlWorksForHostedAndNot() {
verifyJvmArgs(true, false);
verifyJvmArgs(true, true);
verifyJvmArgs(false, false);
verifyJvmArgs(false, true);
}
@Test
public void requireThatWeCanhandleNull() {
ContainerCluster cluster = createContainerCluster(false, false);
addContainer(cluster, "c1", "host-c1");
Container container = cluster.getContainers().get(0);
container.setJvmArgs("");
String empty = container.getJvmArgs();
container.setJvmArgs(null);
assertEquals(empty, container.getJvmArgs());
}
@Test
public void requireThatRoutingProviderIsDisabledForNonHosted() {
DeployState state = new DeployState.Builder().properties(new DeployProperties.Builder().hostedVespa(false).build()).build();
MockRoot root = new MockRoot("foo", state);
ContainerCluster cluster = new ContainerCluster(root, "container0", "container1");
RoutingProviderConfig.Builder builder = new RoutingProviderConfig.Builder();
cluster.getConfig(builder);
RoutingProviderConfig config = new RoutingProviderConfig(builder);
assertFalse(config.enabled());
assertEquals(0, cluster.getAllComponents().stream().map(c -> c.getClassId().getName()).filter(c -> c.equals("com.yahoo.jdisc.http.filter.security.RoutingConfigProvider")).count());
}
private static void addContainer(ContainerCluster cluster, String name, String hostName) {
Container container = new Container(cluster, name, 0);
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService();
cluster.addContainer(container);
}
private static void addClusterController(ContainerCluster cluster, String hostName) {
Container container = new ClusterControllerContainer(cluster, 1, false);
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService();
cluster.addContainer(container);
}
private static ContainerCluster newContainerCluster() {
ContainerCluster cluster = new ContainerCluster(null, "subId", "name");
addContainer(cluster, "c1", "host-c1");
addContainer(cluster, "c2", "host-c2");
return cluster;
}
private static MetricDefaultsConfig getMetricDefaultsConfig(ContainerCluster cluster) {
MetricDefaultsConfig.Builder builder = new MetricDefaultsConfig.Builder();
cluster.getConfig(builder);
return new MetricDefaultsConfig(builder);
}
private static ClusterInfoConfig getClusterInfoConfig(ContainerCluster cluster) {
ClusterInfoConfig.Builder builder = new ClusterInfoConfig.Builder();
cluster.getConfig(builder);
return new ClusterInfoConfig(builder);
}
} |
I was thinking we should also set wantedState to RESUMED and in case the node admin would also converge on RESUMED (e.g. it would sync which containers to run). Later, Chef has come around and tries another one-shot non-RESUMED, and so on. However, whether or not the node-admin have time to get to refreshing the containers to run is racy: A fast Chef client/slow node-admin would short-circuit it. Perhaps we should force at least one node admin state updater tick run too, before trying to freeze and suspend. This may be too complicated. Doing just a one-shot nodeAdmin.setFrozen(false) is a bit strange. Did you have an example where this was important? I think we may have to do it, in case the reason for suspension error is that some other node on another Dh has not converged, and it will not converge until node-agent has had a run on it, but that node-admin is in a similar situation. In any case, please document here why we need it. | private void convergeState(State wantedState) {
boolean wantFrozen = wantedState != RESUMED;
if (!nodeAdmin.setFrozen(wantFrozen)) {
throw new RuntimeException("NodeAdmin has not yet converged to " + (wantFrozen ? "frozen" : "unfrozen"));
}
if (wantedState == RESUMED) {
orchestrator.resume(dockerHostHostName);
if (wantedState == updateAndGetCurrentState(RESUMED)) return;
}
List<String> nodesInActiveState;
try {
nodesInActiveState = getNodesInActiveState();
} catch (IOException e) {
throw new RuntimeException("Failed to get nodes from node repo: " + e.getMessage());
}
if (currentState == RESUMED) {
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
nodesToSuspend.add(dockerHostHostName);
try {
orchestrator.suspend(dockerHostHostName, nodesToSuspend);
} catch (RuntimeException e) {
nodeAdmin.setFrozen(false);
throw e;
}
if (wantedState == updateAndGetCurrentState(SUSPENDED_NODE_ADMIN)) return;
}
nodeAdmin.stopNodeAgentServices(nodesInActiveState);
updateAndGetCurrentState(SUSPENDED);
} | } | private void convergeState(State wantedState) {
boolean wantFrozen = wantedState != RESUMED;
if (!nodeAdmin.setFrozen(wantFrozen)) {
throw new RuntimeException("NodeAdmin has not yet converged to " + (wantFrozen ? "frozen" : "unfrozen"));
}
if (wantedState == RESUMED) {
orchestrator.resume(dockerHostHostName);
if (wantedState == updateAndGetCurrentState(RESUMED)) return;
}
List<String> nodesInActiveState;
try {
nodesInActiveState = getNodesInActiveState();
} catch (IOException e) {
throw new RuntimeException("Failed to get nodes from node repo: " + e.getMessage());
}
if (currentState == RESUMED) {
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
nodesToSuspend.add(dockerHostHostName);
try {
orchestrator.suspend(dockerHostHostName, nodesToSuspend);
} catch (RuntimeException e) {
nodeAdmin.setFrozen(false);
throw e;
}
if (wantedState == updateAndGetCurrentState(SUSPENDED_NODE_ADMIN)) return;
}
nodeAdmin.stopNodeAgentServices(nodesInActiveState);
updateAndGetCurrentState(SUSPENDED);
} | class NodeAdminStateUpdater extends AbstractComponent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private State currentState = SUSPENDED_NODE_ADMIN;
private State wantedState = RESUMED;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdminStateUpdater.class);
private Thread loopThread;
private final NodeRepository nodeRepository;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final Orchestrator orchestrator;
private final String dockerHostHostName;
private long delaysBetweenEachTickMillis = 30_000;
private Instant lastTick;
public NodeAdminStateUpdater(
final NodeRepository nodeRepository,
final NodeAdmin nodeAdmin,
Clock clock,
Orchestrator orchestrator,
String dockerHostHostName) {
this.nodeRepository = nodeRepository;
this.nodeAdmin = nodeAdmin;
this.clock = clock;
this.orchestrator = orchestrator;
this.dockerHostHostName = dockerHostHostName;
this.lastTick = clock.instant();
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
public Map<String, Object> getDebugPage() {
Map<String, Object> debug = new LinkedHashMap<>();
synchronized (monitor) {
debug.put("dockerHostHostName", dockerHostHostName);
debug.put("NodeAdmin", nodeAdmin.debugInfo());
debug.put("Wanted State: ", wantedState);
debug.put("Current State: ", currentState);
}
return debug;
}
public boolean setResumeStateAndCheckIfResumed(State wantedState) {
synchronized (monitor) {
if (this.wantedState != wantedState) {
this.wantedState = wantedState;
signalWorkToBeDone();
}
return currentState == wantedState;
}
}
void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
monitor.notifyAll();
}
}
}
void tick() {
State wantedState = null;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachTickMillis - Duration.between(lastTick, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: NodeAdminStateUpdater");
}
} else break;
}
lastTick = clock.instant();
workToDoNow = false;
if (currentState != this.wantedState) {
wantedState = this.wantedState;
}
}
if (wantedState != null) {
try {
convergeState(wantedState);
} catch (Exception e) {
logger.error("Failed to converge NodeAdminStateUpdater", e);
}
}
fetchContainersToRunFromNodeRepository();
}
/**
* This method attempts to converge node-admin towards one of the {@link State}
*/
private State updateAndGetCurrentState(State currentState) {
synchronized (monitor) {
this.currentState = currentState;
return currentState;
}
}
private void fetchContainersToRunFromNodeRepository() {
synchronized (monitor) {
if (currentState != RESUMED) {
logger.info("Frozen, skipping fetching info from node repository");
return;
}
final List<ContainerNodeSpec> containersToRun;
try {
containersToRun = nodeRepository.getContainersToRun();
} catch (Throwable t) {
logger.warning("Failed fetching container info from node repository", t);
return;
}
if (containersToRun == null) {
logger.warning("Got null from node repository");
return;
}
try {
nodeAdmin.refreshContainersToRun(containersToRun);
} catch (Throwable t) {
logger.warning("Failed updating node admin: ", t);
}
}
}
private List<String> getNodesInActiveState() throws IOException {
return nodeRepository.getContainersToRun()
.stream()
.filter(nodespec -> nodespec.nodeState == Node.State.active)
.map(nodespec -> nodespec.hostname)
.collect(Collectors.toList());
}
public void start(long stateConvergeInterval) {
delaysBetweenEachTickMillis = stateConvergeInterval;
if (loopThread != null) {
throw new RuntimeException("Can not restart NodeAdminStateUpdater");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
}
@Override
public void deconstruct() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop NodeAdminStateUpdater tick thread");
}
scheduler.shutdown();
if (! scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not stop NodeAdminStateUpdater fetch containers scheduler.");
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop NodeAdminStateUpdater thread");
}
nodeAdmin.shutdown();
}
} | class NodeAdminStateUpdater extends AbstractComponent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private State currentState = SUSPENDED_NODE_ADMIN;
private State wantedState = RESUMED;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdminStateUpdater.class);
private Thread loopThread;
private final NodeRepository nodeRepository;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final Orchestrator orchestrator;
private final String dockerHostHostName;
private long delaysBetweenEachTickMillis = 30_000;
private Instant lastTick;
public NodeAdminStateUpdater(
final NodeRepository nodeRepository,
final NodeAdmin nodeAdmin,
Clock clock,
Orchestrator orchestrator,
String dockerHostHostName) {
this.nodeRepository = nodeRepository;
this.nodeAdmin = nodeAdmin;
this.clock = clock;
this.orchestrator = orchestrator;
this.dockerHostHostName = dockerHostHostName;
this.lastTick = clock.instant();
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
public Map<String, Object> getDebugPage() {
Map<String, Object> debug = new LinkedHashMap<>();
synchronized (monitor) {
debug.put("dockerHostHostName", dockerHostHostName);
debug.put("NodeAdmin", nodeAdmin.debugInfo());
debug.put("Wanted State: ", wantedState);
debug.put("Current State: ", currentState);
}
return debug;
}
public boolean setResumeStateAndCheckIfResumed(State wantedState) {
synchronized (monitor) {
if (this.wantedState != wantedState) {
this.wantedState = wantedState;
signalWorkToBeDone();
}
return currentState == wantedState;
}
}
void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
monitor.notifyAll();
}
}
}
void tick() {
State wantedState = null;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachTickMillis - Duration.between(lastTick, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: NodeAdminStateUpdater");
}
} else break;
}
lastTick = clock.instant();
workToDoNow = false;
if (currentState != this.wantedState) {
wantedState = this.wantedState;
}
}
if (wantedState != null) {
try {
convergeState(wantedState);
} catch (Exception e) {
logger.error("Failed to converge NodeAdminStateUpdater", e);
}
}
fetchContainersToRunFromNodeRepository();
}
/**
* This method attempts to converge node-admin towards one of the {@link State}
*/
private State updateAndGetCurrentState(State currentState) {
synchronized (monitor) {
this.currentState = currentState;
return currentState;
}
}
private void fetchContainersToRunFromNodeRepository() {
synchronized (monitor) {
if (currentState != RESUMED) {
logger.info("Frozen, skipping fetching info from node repository");
return;
}
final List<ContainerNodeSpec> containersToRun;
try {
containersToRun = nodeRepository.getContainersToRun();
} catch (Throwable t) {
logger.warning("Failed fetching container info from node repository", t);
return;
}
if (containersToRun == null) {
logger.warning("Got null from node repository");
return;
}
try {
nodeAdmin.refreshContainersToRun(containersToRun);
} catch (Throwable t) {
logger.warning("Failed updating node admin: ", t);
}
}
}
private List<String> getNodesInActiveState() throws IOException {
return nodeRepository.getContainersToRun()
.stream()
.filter(nodespec -> nodespec.nodeState == Node.State.active)
.map(nodespec -> nodespec.hostname)
.collect(Collectors.toList());
}
public void start(long stateConvergeInterval) {
delaysBetweenEachTickMillis = stateConvergeInterval;
if (loopThread != null) {
throw new RuntimeException("Can not restart NodeAdminStateUpdater");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
}
@Override
public void deconstruct() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop NodeAdminStateUpdater tick thread");
}
scheduler.shutdown();
if (! scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not stop NodeAdminStateUpdater fetch containers scheduler.");
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop NodeAdminStateUpdater thread");
}
nodeAdmin.shutdown();
}
} |
`setFrozen(false)` vs `wantedState = RESUMED`: by setting unfrozen, node-admin will retry by itself to suspend during next `NodeAdminStateUpdater` `tick`, this means that at some point, node-admin will be able to converge to its desired state by itself and when Chef makes the request, we can instantly approve. Whereas if we set `wantedState` to `RESUMED`, Chef will have to make the retry every time, and each time it will be instantly rejected because node-admin has to freeze everything first, then ask Orchestrator. Therefore upgrade will take `avgIntervalBetweenChefRun * numberOfHosts` longer. | private void convergeState(State wantedState) {
boolean wantFrozen = wantedState != RESUMED;
if (!nodeAdmin.setFrozen(wantFrozen)) {
throw new RuntimeException("NodeAdmin has not yet converged to " + (wantFrozen ? "frozen" : "unfrozen"));
}
if (wantedState == RESUMED) {
orchestrator.resume(dockerHostHostName);
if (wantedState == updateAndGetCurrentState(RESUMED)) return;
}
List<String> nodesInActiveState;
try {
nodesInActiveState = getNodesInActiveState();
} catch (IOException e) {
throw new RuntimeException("Failed to get nodes from node repo: " + e.getMessage());
}
if (currentState == RESUMED) {
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
nodesToSuspend.add(dockerHostHostName);
try {
orchestrator.suspend(dockerHostHostName, nodesToSuspend);
} catch (RuntimeException e) {
nodeAdmin.setFrozen(false);
throw e;
}
if (wantedState == updateAndGetCurrentState(SUSPENDED_NODE_ADMIN)) return;
}
nodeAdmin.stopNodeAgentServices(nodesInActiveState);
updateAndGetCurrentState(SUSPENDED);
} | } | private void convergeState(State wantedState) {
boolean wantFrozen = wantedState != RESUMED;
if (!nodeAdmin.setFrozen(wantFrozen)) {
throw new RuntimeException("NodeAdmin has not yet converged to " + (wantFrozen ? "frozen" : "unfrozen"));
}
if (wantedState == RESUMED) {
orchestrator.resume(dockerHostHostName);
if (wantedState == updateAndGetCurrentState(RESUMED)) return;
}
List<String> nodesInActiveState;
try {
nodesInActiveState = getNodesInActiveState();
} catch (IOException e) {
throw new RuntimeException("Failed to get nodes from node repo: " + e.getMessage());
}
if (currentState == RESUMED) {
List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState);
nodesToSuspend.add(dockerHostHostName);
try {
orchestrator.suspend(dockerHostHostName, nodesToSuspend);
} catch (RuntimeException e) {
nodeAdmin.setFrozen(false);
throw e;
}
if (wantedState == updateAndGetCurrentState(SUSPENDED_NODE_ADMIN)) return;
}
nodeAdmin.stopNodeAgentServices(nodesInActiveState);
updateAndGetCurrentState(SUSPENDED);
} | class NodeAdminStateUpdater extends AbstractComponent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private State currentState = SUSPENDED_NODE_ADMIN;
private State wantedState = RESUMED;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdminStateUpdater.class);
private Thread loopThread;
private final NodeRepository nodeRepository;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final Orchestrator orchestrator;
private final String dockerHostHostName;
private long delaysBetweenEachTickMillis = 30_000;
private Instant lastTick;
public NodeAdminStateUpdater(
final NodeRepository nodeRepository,
final NodeAdmin nodeAdmin,
Clock clock,
Orchestrator orchestrator,
String dockerHostHostName) {
this.nodeRepository = nodeRepository;
this.nodeAdmin = nodeAdmin;
this.clock = clock;
this.orchestrator = orchestrator;
this.dockerHostHostName = dockerHostHostName;
this.lastTick = clock.instant();
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
public Map<String, Object> getDebugPage() {
Map<String, Object> debug = new LinkedHashMap<>();
synchronized (monitor) {
debug.put("dockerHostHostName", dockerHostHostName);
debug.put("NodeAdmin", nodeAdmin.debugInfo());
debug.put("Wanted State: ", wantedState);
debug.put("Current State: ", currentState);
}
return debug;
}
public boolean setResumeStateAndCheckIfResumed(State wantedState) {
synchronized (monitor) {
if (this.wantedState != wantedState) {
this.wantedState = wantedState;
signalWorkToBeDone();
}
return currentState == wantedState;
}
}
void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
monitor.notifyAll();
}
}
}
void tick() {
State wantedState = null;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachTickMillis - Duration.between(lastTick, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: NodeAdminStateUpdater");
}
} else break;
}
lastTick = clock.instant();
workToDoNow = false;
if (currentState != this.wantedState) {
wantedState = this.wantedState;
}
}
if (wantedState != null) {
try {
convergeState(wantedState);
} catch (Exception e) {
logger.error("Failed to converge NodeAdminStateUpdater", e);
}
}
fetchContainersToRunFromNodeRepository();
}
/**
* This method attempts to converge node-admin towards one of the {@link State}
*/
private State updateAndGetCurrentState(State currentState) {
synchronized (monitor) {
this.currentState = currentState;
return currentState;
}
}
private void fetchContainersToRunFromNodeRepository() {
synchronized (monitor) {
if (currentState != RESUMED) {
logger.info("Frozen, skipping fetching info from node repository");
return;
}
final List<ContainerNodeSpec> containersToRun;
try {
containersToRun = nodeRepository.getContainersToRun();
} catch (Throwable t) {
logger.warning("Failed fetching container info from node repository", t);
return;
}
if (containersToRun == null) {
logger.warning("Got null from node repository");
return;
}
try {
nodeAdmin.refreshContainersToRun(containersToRun);
} catch (Throwable t) {
logger.warning("Failed updating node admin: ", t);
}
}
}
private List<String> getNodesInActiveState() throws IOException {
return nodeRepository.getContainersToRun()
.stream()
.filter(nodespec -> nodespec.nodeState == Node.State.active)
.map(nodespec -> nodespec.hostname)
.collect(Collectors.toList());
}
public void start(long stateConvergeInterval) {
delaysBetweenEachTickMillis = stateConvergeInterval;
if (loopThread != null) {
throw new RuntimeException("Can not restart NodeAdminStateUpdater");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
}
@Override
public void deconstruct() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop NodeAdminStateUpdater tick thread");
}
scheduler.shutdown();
if (! scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not stop NodeAdminStateUpdater fetch containers scheduler.");
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop NodeAdminStateUpdater thread");
}
nodeAdmin.shutdown();
}
} | class NodeAdminStateUpdater extends AbstractComponent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private State currentState = SUSPENDED_NODE_ADMIN;
private State wantedState = RESUMED;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
private final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdminStateUpdater.class);
private Thread loopThread;
private final NodeRepository nodeRepository;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final Orchestrator orchestrator;
private final String dockerHostHostName;
private long delaysBetweenEachTickMillis = 30_000;
private Instant lastTick;
public NodeAdminStateUpdater(
final NodeRepository nodeRepository,
final NodeAdmin nodeAdmin,
Clock clock,
Orchestrator orchestrator,
String dockerHostHostName) {
this.nodeRepository = nodeRepository;
this.nodeAdmin = nodeAdmin;
this.clock = clock;
this.orchestrator = orchestrator;
this.dockerHostHostName = dockerHostHostName;
this.lastTick = clock.instant();
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
public Map<String, Object> getDebugPage() {
Map<String, Object> debug = new LinkedHashMap<>();
synchronized (monitor) {
debug.put("dockerHostHostName", dockerHostHostName);
debug.put("NodeAdmin", nodeAdmin.debugInfo());
debug.put("Wanted State: ", wantedState);
debug.put("Current State: ", currentState);
}
return debug;
}
public boolean setResumeStateAndCheckIfResumed(State wantedState) {
synchronized (monitor) {
if (this.wantedState != wantedState) {
this.wantedState = wantedState;
signalWorkToBeDone();
}
return currentState == wantedState;
}
}
void signalWorkToBeDone() {
synchronized (monitor) {
if (! workToDoNow) {
workToDoNow = true;
monitor.notifyAll();
}
}
}
void tick() {
State wantedState = null;
synchronized (monitor) {
while (! workToDoNow) {
long remainder = delaysBetweenEachTickMillis - Duration.between(lastTick, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: NodeAdminStateUpdater");
}
} else break;
}
lastTick = clock.instant();
workToDoNow = false;
if (currentState != this.wantedState) {
wantedState = this.wantedState;
}
}
if (wantedState != null) {
try {
convergeState(wantedState);
} catch (Exception e) {
logger.error("Failed to converge NodeAdminStateUpdater", e);
}
}
fetchContainersToRunFromNodeRepository();
}
/**
* This method attempts to converge node-admin towards one of the {@link State}
*/
private State updateAndGetCurrentState(State currentState) {
synchronized (monitor) {
this.currentState = currentState;
return currentState;
}
}
private void fetchContainersToRunFromNodeRepository() {
synchronized (monitor) {
if (currentState != RESUMED) {
logger.info("Frozen, skipping fetching info from node repository");
return;
}
final List<ContainerNodeSpec> containersToRun;
try {
containersToRun = nodeRepository.getContainersToRun();
} catch (Throwable t) {
logger.warning("Failed fetching container info from node repository", t);
return;
}
if (containersToRun == null) {
logger.warning("Got null from node repository");
return;
}
try {
nodeAdmin.refreshContainersToRun(containersToRun);
} catch (Throwable t) {
logger.warning("Failed updating node admin: ", t);
}
}
}
private List<String> getNodesInActiveState() throws IOException {
return nodeRepository.getContainersToRun()
.stream()
.filter(nodespec -> nodespec.nodeState == Node.State.active)
.map(nodespec -> nodespec.hostname)
.collect(Collectors.toList());
}
public void start(long stateConvergeInterval) {
delaysBetweenEachTickMillis = stateConvergeInterval;
if (loopThread != null) {
throw new RuntimeException("Can not restart NodeAdminStateUpdater");
}
loopThread = new Thread(() -> {
while (! terminated.get()) tick();
});
loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
}
@Override
public void deconstruct() {
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop NodeAdminStateUpdater tick thread");
}
scheduler.shutdown();
if (! scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
throw new RuntimeException("Could not stop NodeAdminStateUpdater fetch containers scheduler.");
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop NodeAdminStateUpdater thread");
}
nodeAdmin.shutdown();
}
} |
Why "final"? | RoutingPolicy createPolicy(String name, String param) {
RoutingPolicyFactory factory = getFactory(name);
if (factory == null) {
log.log(LogLevel.ERROR, "No routing policy factory found for name '" + name + "'.");
return null;
}
final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param);
if (ret == null) {
log.log(LogLevel.ERROR, "Routing policy factory " + factory.getClass().getName() + " failed to create a " +
"routing policy for parameter '" + name + "'.");
return null;
}
if (ret.getMetrics() != null) {
metrics.routingPolicyMetrics.addMetric(ret.getMetrics());
}
return ret;
} | final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param); | RoutingPolicy createPolicy(String name, String param) {
RoutingPolicyFactory factory = getFactory(name);
if (factory == null) {
log.log(LogLevel.ERROR, "No routing policy factory found for name '" + name + "'.");
return null;
}
final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param);
if (ret == null) {
log.log(LogLevel.ERROR, "Routing policy factory " + factory.getClass().getName() + " failed to create a " +
"routing policy for parameter '" + name + "'.");
return null;
}
if (ret.getMetrics() != null) {
metrics.routingPolicyMetrics.addMetric(ret.getMetrics());
}
return ret;
} | class RoutingPolicyRepository {
private static final Logger log = Logger.getLogger(RoutingPolicyRepository.class.getName());
private final Map<String, RoutingPolicyFactory> factories = new ConcurrentHashMap<String, RoutingPolicyFactory>();
private final DocumentProtocolMetricSet metrics;
RoutingPolicyRepository(DocumentProtocolMetricSet metrics) {
this.metrics = metrics;
}
/**
* Registers a routing policy factory for a given name.
*
* @param name The name of the factory to register.
* @param factory The factory to register.
*/
void putFactory(String name, RoutingPolicyFactory factory) {
factories.put(name, factory);
}
/**
* Returns the routing policy factory for a given name.
*
* @param name The name of the factory to return.
* @return The routing policy factory matching the criteria, or null.
*/
RoutingPolicyFactory getFactory(String name) {
return factories.get(name);
}
/**
* Creates and returns a routing policy using the named factory and the given parameter.
*
* @param name The name of the factory to use.
* @param param The parameter to pass to the factory.
* @return The created policy.
*/
} | class RoutingPolicyRepository {
private static final Logger log = Logger.getLogger(RoutingPolicyRepository.class.getName());
private final Map<String, RoutingPolicyFactory> factories = new ConcurrentHashMap<String, RoutingPolicyFactory>();
private final DocumentProtocolMetricSet metrics;
RoutingPolicyRepository(DocumentProtocolMetricSet metrics) {
this.metrics = metrics;
}
/**
* Registers a routing policy factory for a given name.
*
* @param name The name of the factory to register.
* @param factory The factory to register.
*/
void putFactory(String name, RoutingPolicyFactory factory) {
factories.put(name, factory);
}
/**
* Returns the routing policy factory for a given name.
*
* @param name The name of the factory to return.
* @return The routing policy factory matching the criteria, or null.
*/
RoutingPolicyFactory getFactory(String name) {
return factories.get(name);
}
/**
* Creates and returns a routing policy using the named factory and the given parameter.
*
* @param name The name of the factory to use.
* @param param The parameter to pass to the factory.
* @return The created policy.
*/
} |
No point in letting references be mutable unless they explicitly have to be. | RoutingPolicy createPolicy(String name, String param) {
RoutingPolicyFactory factory = getFactory(name);
if (factory == null) {
log.log(LogLevel.ERROR, "No routing policy factory found for name '" + name + "'.");
return null;
}
final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param);
if (ret == null) {
log.log(LogLevel.ERROR, "Routing policy factory " + factory.getClass().getName() + " failed to create a " +
"routing policy for parameter '" + name + "'.");
return null;
}
if (ret.getMetrics() != null) {
metrics.routingPolicyMetrics.addMetric(ret.getMetrics());
}
return ret;
} | final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param); | RoutingPolicy createPolicy(String name, String param) {
RoutingPolicyFactory factory = getFactory(name);
if (factory == null) {
log.log(LogLevel.ERROR, "No routing policy factory found for name '" + name + "'.");
return null;
}
final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param);
if (ret == null) {
log.log(LogLevel.ERROR, "Routing policy factory " + factory.getClass().getName() + " failed to create a " +
"routing policy for parameter '" + name + "'.");
return null;
}
if (ret.getMetrics() != null) {
metrics.routingPolicyMetrics.addMetric(ret.getMetrics());
}
return ret;
} | class RoutingPolicyRepository {
private static final Logger log = Logger.getLogger(RoutingPolicyRepository.class.getName());
private final Map<String, RoutingPolicyFactory> factories = new ConcurrentHashMap<String, RoutingPolicyFactory>();
private final DocumentProtocolMetricSet metrics;
RoutingPolicyRepository(DocumentProtocolMetricSet metrics) {
this.metrics = metrics;
}
/**
* Registers a routing policy factory for a given name.
*
* @param name The name of the factory to register.
* @param factory The factory to register.
*/
void putFactory(String name, RoutingPolicyFactory factory) {
factories.put(name, factory);
}
/**
* Returns the routing policy factory for a given name.
*
* @param name The name of the factory to return.
* @return The routing policy factory matching the criteria, or null.
*/
RoutingPolicyFactory getFactory(String name) {
return factories.get(name);
}
/**
* Creates and returns a routing policy using the named factory and the given parameter.
*
* @param name The name of the factory to use.
* @param param The parameter to pass to the factory.
* @return The created policy.
*/
} | class RoutingPolicyRepository {
private static final Logger log = Logger.getLogger(RoutingPolicyRepository.class.getName());
private final Map<String, RoutingPolicyFactory> factories = new ConcurrentHashMap<String, RoutingPolicyFactory>();
private final DocumentProtocolMetricSet metrics;
RoutingPolicyRepository(DocumentProtocolMetricSet metrics) {
this.metrics = metrics;
}
/**
* Registers a routing policy factory for a given name.
*
* @param name The name of the factory to register.
* @param factory The factory to register.
*/
void putFactory(String name, RoutingPolicyFactory factory) {
factories.put(name, factory);
}
/**
* Returns the routing policy factory for a given name.
*
* @param name The name of the factory to return.
* @return The routing policy factory matching the criteria, or null.
*/
RoutingPolicyFactory getFactory(String name) {
return factories.get(name);
}
/**
* Creates and returns a routing policy using the named factory and the given parameter.
*
* @param name The name of the factory to use.
* @param param The parameter to pass to the factory.
* @return The created policy.
*/
} |
Except avoiding visual clutter. | RoutingPolicy createPolicy(String name, String param) {
RoutingPolicyFactory factory = getFactory(name);
if (factory == null) {
log.log(LogLevel.ERROR, "No routing policy factory found for name '" + name + "'.");
return null;
}
final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param);
if (ret == null) {
log.log(LogLevel.ERROR, "Routing policy factory " + factory.getClass().getName() + " failed to create a " +
"routing policy for parameter '" + name + "'.");
return null;
}
if (ret.getMetrics() != null) {
metrics.routingPolicyMetrics.addMetric(ret.getMetrics());
}
return ret;
} | final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param); | RoutingPolicy createPolicy(String name, String param) {
RoutingPolicyFactory factory = getFactory(name);
if (factory == null) {
log.log(LogLevel.ERROR, "No routing policy factory found for name '" + name + "'.");
return null;
}
final DocumentProtocolRoutingPolicy ret = factory.createPolicy(param);
if (ret == null) {
log.log(LogLevel.ERROR, "Routing policy factory " + factory.getClass().getName() + " failed to create a " +
"routing policy for parameter '" + name + "'.");
return null;
}
if (ret.getMetrics() != null) {
metrics.routingPolicyMetrics.addMetric(ret.getMetrics());
}
return ret;
} | class RoutingPolicyRepository {
private static final Logger log = Logger.getLogger(RoutingPolicyRepository.class.getName());
private final Map<String, RoutingPolicyFactory> factories = new ConcurrentHashMap<String, RoutingPolicyFactory>();
private final DocumentProtocolMetricSet metrics;
RoutingPolicyRepository(DocumentProtocolMetricSet metrics) {
this.metrics = metrics;
}
/**
* Registers a routing policy factory for a given name.
*
* @param name The name of the factory to register.
* @param factory The factory to register.
*/
void putFactory(String name, RoutingPolicyFactory factory) {
factories.put(name, factory);
}
/**
* Returns the routing policy factory for a given name.
*
* @param name The name of the factory to return.
* @return The routing policy factory matching the criteria, or null.
*/
RoutingPolicyFactory getFactory(String name) {
return factories.get(name);
}
/**
* Creates and returns a routing policy using the named factory and the given parameter.
*
* @param name The name of the factory to use.
* @param param The parameter to pass to the factory.
* @return The created policy.
*/
} | class RoutingPolicyRepository {
private static final Logger log = Logger.getLogger(RoutingPolicyRepository.class.getName());
private final Map<String, RoutingPolicyFactory> factories = new ConcurrentHashMap<String, RoutingPolicyFactory>();
private final DocumentProtocolMetricSet metrics;
RoutingPolicyRepository(DocumentProtocolMetricSet metrics) {
this.metrics = metrics;
}
/**
* Registers a routing policy factory for a given name.
*
* @param name The name of the factory to register.
* @param factory The factory to register.
*/
void putFactory(String name, RoutingPolicyFactory factory) {
factories.put(name, factory);
}
/**
* Returns the routing policy factory for a given name.
*
* @param name The name of the factory to return.
* @return The routing policy factory matching the criteria, or null.
*/
RoutingPolicyFactory getFactory(String name) {
return factories.get(name);
}
/**
* Creates and returns a routing policy using the named factory and the given parameter.
*
* @param name The name of the factory to use.
* @param param The parameter to pass to the factory.
* @return The created policy.
*/
} |
Nit: could pull some duplicate test code out into a shared fixture or separate function | public void testAllowedToSetDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
ClusterState stateWithRetiredNode = clusterState(String.format(
"version:%d distributor:4 storage:4 .%d.s:r",
currentClusterStateVersion, nodeStorage.getIndex()));
cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex())
.setHostInfo(createHostInfoWithMetrics(currentClusterStateVersion, 0));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithRetiredNode, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, DOWN_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
} | .setHostInfo(createHostInfoWithMetrics(currentClusterStateVersion, 0)); | public void testAllowedToSetDown() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
retiredClusterStateSuffix(),
State.UP,
currentClusterStateVersion,
0);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
} | class NodeStateChangeCheckerTest {
private static final int minStorageNodesUp = 3;
private static final int requiredRedundancy = 4;
private static final int currentClusterStateVersion = 2;
private static final double minRatioOfStorageNodesUp = 0.9;
private static final Node nodeDistributor = new Node(NodeType.DISTRIBUTOR, 1);
private static final Node nodeStorage = new Node(NodeType.STORAGE, 1);
private static final NodeState UP_NODE_STATE = new NodeState(NodeType.STORAGE, State.UP);
public static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator");
public static final NodeState DOWN_NODE_STATE = createNodeState(State.DOWN, "RetireEarlyExpirer");
private static NodeState createNodeState(State state, String description) {
return new NodeState(NodeType.STORAGE, state).setDescription(description);
}
private static ClusterState clusterState(String state) {
try {
return new ClusterState(state);
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
private static ClusterState defaultAllUpClusterState() {
return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion));
}
private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) {
return new NodeStateChangeChecker(minStorageNodesUp, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
}
private ContentCluster createCluster(Collection<ConfiguredNode> nodes) {
Distribution distribution = mock(Distribution.class);
Group group = new Group(2, "to");
when(distribution.getRootGroup()).thenReturn(group);
return new ContentCluster("Clustername", nodes, distribution, minStorageNodesUp, 0.0);
}
private StorageNodeInfo createStorageNodeInfo(int index, State state) {
Distribution distribution = mock(Distribution.class);
Group group = new Group(2, "to");
when(distribution.getRootGroup()).thenReturn(group);
String clusterName = "Clustername";
Set<ConfiguredNode> configuredNodeIndexes = new HashSet<>();
ContentCluster cluster = new ContentCluster(clusterName, configuredNodeIndexes, distribution, minStorageNodesUp, 0.0);
String rpcAddress = "";
StorageNodeInfo storageNodeInfo = new StorageNodeInfo(cluster, index, false, rpcAddress, distribution);
storageNodeInfo.setReportedState(new NodeState(NodeType.STORAGE, state), 3 /* time */);
return storageNodeInfo;
}
private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) {
return "{\n" +
" \"cluster-state-version\": 2,\n" +
" \"distributor\": {\n" +
" \"storage-nodes\": [\n" +
" {\n" +
" \"node-index\": 0,\n" +
" \"min-current-replication-factor\": " + replicationfactor1 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 1,\n" +
" \"min-current-replication-factor\": " + replicationfactor2 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 2,\n" +
" \"min-current-replication-factor\": " + replicationfactor3 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 3\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n";
}
private void markAllNodesAsReportingStateUp(ContentCluster cluster) {
final ClusterInfo clusterInfo = cluster.clusterInfo();
final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size();
for (int i = 0; i < configuredNodeCount; i++) {
clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP), 0);
clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(NodeType.STORAGE, State.UP), 0);
}
}
@Test
public void testCanUpgradeForce() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeState newState = new NodeState(NodeType.STORAGE, State.INITIALIZING);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SetUnitStateRequest.Condition.FORCE,
UP_NODE_STATE, newState);
assertTrue(result.settingWantedStateIsAllowed());
assertTrue(!result.wantedStateAlreadySet());
}
@Test
public void testSafeSetStateDistributors() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), containsString("Safe-set of node state is only supported for storage nodes"));
}
@Test
public void testCanUpgradeSafeMissingStorage() {
ContentCluster cluster = createCluster(createNodes(4));
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
5 /* min storage nodes */, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("There are only 4 storage nodes up, while config requires at least 5"));
}
@Test
public void testCanUpgradeStorageSafeYes() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testSetUpFailsIfReportedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
markAllNodesAsReportingStateUp(cluster);
ClusterState stateWithNodeDown = clusterState(String.format(
"version:%d distributor:4 storage:4 .%d.s:d",
currentClusterStateVersion, nodeStorage.getIndex()));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SetUnitStateRequest.Condition.SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCannotSetUpIfUnknownOldStateAndReportedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
new NodeState(NodeType.STORAGE, State.DOWN), UP_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Refusing to set wanted state to up when it is currently in Down"));
}
@Test
public void testCanUpgradeStorageSafeNo() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Distributor 0 says storage node 1 " +
"has buckets with redundancy as low as 3, but we require at least 4"));
}
@Test
public void testCanUpgradeIfMissingMinReplicationFactor() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 3), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeIfStorageNodeMissingFromNodeInfo() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
String hostInfo = "{\n" +
" \"cluster-state-version\": 2,\n" +
" \"distributor\": {\n" +
" \"storage-nodes\": [\n" +
" {\n" +
" \"node-index\": 0,\n" +
" \"min-current-replication-factor\": " + requiredRedundancy + "\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n";
setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 1), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testMissingDistributorState() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(NodeType.STORAGE, State.UP), 0);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Distributor node (0) has not reported any cluster state version yet."));
}
private NodeStateChangeChecker.Result transitionToSameState(State state, String oldDescription, String newDescription) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeState currentNodeState = createNodeState(state, oldDescription);
NodeState newNodeState = createNodeState(state, newDescription);
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
currentNodeState, newNodeState);
}
private NodeStateChangeChecker.Result transitionToSameState(String oldDescription, String newDescription) {
return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription);
}
@Test
public void testSettingUpWhenUpCausesAlreadySet() {
NodeStateChangeChecker.Result result = transitionToSameState(State.UP, "foo", "bar");
assertTrue(result.wantedStateAlreadySet());
}
@Test
public void testSettingAlreadySetState() {
NodeStateChangeChecker.Result result = transitionToSameState("foo", "foo");
assertFalse(result.settingWantedStateIsAllowed());
assertTrue(result.wantedStateAlreadySet());
}
@Test
public void testDifferentDescriptionImpliesAlreadySet() {
NodeStateChangeChecker.Result result = transitionToSameState("foo", "bar");
assertFalse(result.settingWantedStateIsAllowed());
assertTrue(result.wantedStateAlreadySet());
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithOneStorageNodeDown(
int storageNodeIndex, boolean alternatingUpRetiredAndInitializing) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) {
State state = State.UP;
if (alternatingUpRetiredAndInitializing) {
if (x % 3 == 1) state = State.RETIRED;
else if (x % 3 == 2) state = State.INITIALIZING;
}
cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(NodeType.DISTRIBUTOR, state), 0);
cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(NodeType.STORAGE, state), 0);
}
ClusterState clusterState = defaultAllUpClusterState();
if (storageNodeIndex >= 0) {
NodeState downNodeState = new NodeState(NodeType.STORAGE, State.DOWN);
cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */);
clusterState.setNodeState(new Node(NodeType.STORAGE, storageNodeIndex), downNodeState);
}
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, clusterState, SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
}
private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) {
for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) {
State state = State.UP;
cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(NodeType.DISTRIBUTOR, state), 0);
cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo);
cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(NodeType.STORAGE, state), 0);
}
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithOneStorageNodeDown(int storageNodeIndex) {
return transitionToMaintenanceWithOneStorageNodeDown(storageNodeIndex, false);
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithNoStorageNodesDown() {
return transitionToMaintenanceWithOneStorageNodeDown(-1, false);
}
@Test
public void testCanUpgradeWhenAllUp() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeWhenAllUpOrRetired() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeWhenStorageIsDown() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithOneStorageNodeDown(nodeStorage.getIndex());
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCannotUpgradeWhenOtherStorageIsDown() {
int otherIndex = 2;
assertNotEquals(nodeStorage.getIndex(), otherIndex);
NodeStateChangeChecker.Result result = transitionToMaintenanceWithOneStorageNodeDown(otherIndex);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), containsString("Not enough storage nodes running"));
}
@Test
public void testNodeRatioRequirementConsidersGeneratedNodeStates() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
markAllNodesAsReportingStateUp(cluster);
ClusterState stateWithNodeDown = clusterState(String.format(
"version:%d distributor:4 storage:4 .3.s:d",
currentClusterStateVersion));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testDisallowedByNonRetiredState() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex())
.setHostInfo(createHostInfoWithMetrics(currentClusterStateVersion, 1));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, DOWN_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason());
}
@Test
public void testDisallowedByBuckets() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
ClusterState stateWithRetiredNode = clusterState(String.format(
"version:%d distributor:4 storage:4 .%d.s:r",
currentClusterStateVersion, nodeStorage.getIndex()));
cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex())
.setHostInfo(createHostInfoWithMetrics(currentClusterStateVersion, 1));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithRetiredNode, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, DOWN_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("The storage node manages 1 buckets", result.getReason());
}
@Test
private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) {
return HostInfo.createHostInfo(String.format("{\n" +
" \"metrics\":\n" +
" {\n" +
" \"snapshot\":\n" +
" {\n" +
" \"from\":1494940706,\n" +
" \"to\":1494940766\n" +
" },\n" +
" \"values\":\n" +
" [\n" +
" {\n" +
" \"name\":\"vds.datastored.alldisks.buckets\",\n" +
" \"description\":\"buckets managed\",\n" +
" \"values\":\n" +
" {\n" +
" \"average\":262144.0,\n" +
" \"count\":1,\n" +
" \"rate\":0.016666,\n" +
" \"min\":262144,\n" +
" \"max\":262144,\n" +
" \"last\":%d\n" +
" },\n" +
" \"dimensions\":\n" +
" {\n" +
" }\n" +
" },\n" +
" {\n" +
" \"name\":\"vds.datastored.alldisks.docs\",\n" +
" \"description\":\"documents stored\",\n" +
" \"values\":\n" +
" {\n" +
" \"average\":154689587.0,\n" +
" \"count\":1,\n" +
" \"rate\":0.016666,\n" +
" \"min\":154689587,\n" +
" \"max\":154689587,\n" +
" \"last\":154689587\n" +
" },\n" +
" \"dimensions\":\n" +
" {\n" +
" }\n" +
" }\n" +
" ]\n" +
" },\n" +
" \"cluster-state-version\":%d\n" +
"}",
lastAlldisksBuckets, clusterStateVersion));
}
private List<ConfiguredNode> createNodes(int count) {
List<ConfiguredNode> nodes = new ArrayList<>();
for (int i = 0; i < count; i++)
nodes.add(new ConfiguredNode(i, false));
return nodes;
}
} | class NodeStateChangeCheckerTest {
private static final int minStorageNodesUp = 3;
private static final int requiredRedundancy = 4;
private static final int currentClusterStateVersion = 2;
private static final double minRatioOfStorageNodesUp = 0.9;
private static final Node nodeDistributor = new Node(NodeType.DISTRIBUTOR, 1);
private static final Node nodeStorage = new Node(NodeType.STORAGE, 1);
private static final NodeState UP_NODE_STATE = new NodeState(NodeType.STORAGE, State.UP);
public static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator");
public static final NodeState DOWN_NODE_STATE = createNodeState(State.DOWN, "RetireEarlyExpirer");
private static NodeState createNodeState(State state, String description) {
return new NodeState(NodeType.STORAGE, state).setDescription(description);
}
private static ClusterState clusterState(String state) {
try {
return new ClusterState(state);
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
private static ClusterState defaultAllUpClusterState() {
return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion));
}
private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) {
return new NodeStateChangeChecker(minStorageNodesUp, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
}
private ContentCluster createCluster(Collection<ConfiguredNode> nodes) {
Distribution distribution = mock(Distribution.class);
Group group = new Group(2, "to");
when(distribution.getRootGroup()).thenReturn(group);
return new ContentCluster("Clustername", nodes, distribution, minStorageNodesUp, 0.0);
}
private StorageNodeInfo createStorageNodeInfo(int index, State state) {
Distribution distribution = mock(Distribution.class);
Group group = new Group(2, "to");
when(distribution.getRootGroup()).thenReturn(group);
String clusterName = "Clustername";
Set<ConfiguredNode> configuredNodeIndexes = new HashSet<>();
ContentCluster cluster = new ContentCluster(clusterName, configuredNodeIndexes, distribution, minStorageNodesUp, 0.0);
String rpcAddress = "";
StorageNodeInfo storageNodeInfo = new StorageNodeInfo(cluster, index, false, rpcAddress, distribution);
storageNodeInfo.setReportedState(new NodeState(NodeType.STORAGE, state), 3 /* time */);
return storageNodeInfo;
}
private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) {
return "{\n" +
" \"cluster-state-version\": 2,\n" +
" \"distributor\": {\n" +
" \"storage-nodes\": [\n" +
" {\n" +
" \"node-index\": 0,\n" +
" \"min-current-replication-factor\": " + replicationfactor1 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 1,\n" +
" \"min-current-replication-factor\": " + replicationfactor2 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 2,\n" +
" \"min-current-replication-factor\": " + replicationfactor3 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 3\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n";
}
private void markAllNodesAsReportingStateUp(ContentCluster cluster) {
final ClusterInfo clusterInfo = cluster.clusterInfo();
final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size();
for (int i = 0; i < configuredNodeCount; i++) {
clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP), 0);
clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(NodeType.STORAGE, State.UP), 0);
}
}
@Test
public void testCanUpgradeForce() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeState newState = new NodeState(NodeType.STORAGE, State.INITIALIZING);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SetUnitStateRequest.Condition.FORCE,
UP_NODE_STATE, newState);
assertTrue(result.settingWantedStateIsAllowed());
assertTrue(!result.wantedStateAlreadySet());
}
@Test
public void testUnknownStorageNode() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
5 /* min storage nodes */, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 10), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Unknown node storage.10"));
}
@Test
public void testSafeSetStateDistributors() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), containsString("Safe-set of node state is only supported for storage nodes"));
}
@Test
public void testCanUpgradeSafeMissingStorage() {
ContentCluster cluster = createCluster(createNodes(4));
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
5 /* min storage nodes */, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("There are only 4 storage nodes up, while config requires at least 5"));
}
@Test
public void testCanUpgradeStorageSafeYes() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testSetUpFailsIfReportedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
markAllNodesAsReportingStateUp(cluster);
ClusterState stateWithNodeDown = clusterState(String.format(
"version:%d distributor:4 storage:4 .%d.s:d",
currentClusterStateVersion, nodeStorage.getIndex()));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SetUnitStateRequest.Condition.SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCannotSetUpIfUnknownOldStateAndReportedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
new NodeState(NodeType.STORAGE, State.DOWN), UP_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Refusing to set wanted state to up when it is currently in Down"));
}
@Test
public void testCanUpgradeStorageSafeNo() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Distributor 0 says storage node 1 " +
"has buckets with redundancy as low as 3, but we require at least 4"));
}
@Test
public void testCanUpgradeIfMissingMinReplicationFactor() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 3), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeIfStorageNodeMissingFromNodeInfo() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
String hostInfo = "{\n" +
" \"cluster-state-version\": 2,\n" +
" \"distributor\": {\n" +
" \"storage-nodes\": [\n" +
" {\n" +
" \"node-index\": 0,\n" +
" \"min-current-replication-factor\": " + requiredRedundancy + "\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n";
setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 1), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testMissingDistributorState() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(NodeType.STORAGE, State.UP), 0);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Distributor node (0) has not reported any cluster state version yet."));
}
private NodeStateChangeChecker.Result transitionToSameState(State state, String oldDescription, String newDescription) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeState currentNodeState = createNodeState(state, oldDescription);
NodeState newNodeState = createNodeState(state, newDescription);
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
currentNodeState, newNodeState);
}
private NodeStateChangeChecker.Result transitionToSameState(String oldDescription, String newDescription) {
return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription);
}
@Test
public void testSettingUpWhenUpCausesAlreadySet() {
NodeStateChangeChecker.Result result = transitionToSameState(State.UP, "foo", "bar");
assertTrue(result.wantedStateAlreadySet());
}
@Test
public void testSettingAlreadySetState() {
NodeStateChangeChecker.Result result = transitionToSameState("foo", "foo");
assertFalse(result.settingWantedStateIsAllowed());
assertTrue(result.wantedStateAlreadySet());
}
@Test
public void testDifferentDescriptionImpliesAlreadySet() {
NodeStateChangeChecker.Result result = transitionToSameState("foo", "bar");
assertFalse(result.settingWantedStateIsAllowed());
assertTrue(result.wantedStateAlreadySet());
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithOneStorageNodeDown(
int storageNodeIndex, boolean alternatingUpRetiredAndInitializing) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) {
State state = State.UP;
if (alternatingUpRetiredAndInitializing) {
if (x % 3 == 1) state = State.RETIRED;
else if (x % 3 == 2) state = State.INITIALIZING;
}
cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(NodeType.DISTRIBUTOR, state), 0);
cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(NodeType.STORAGE, state), 0);
}
ClusterState clusterState = defaultAllUpClusterState();
if (storageNodeIndex >= 0) {
NodeState downNodeState = new NodeState(NodeType.STORAGE, State.DOWN);
cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */);
clusterState.setNodeState(new Node(NodeType.STORAGE, storageNodeIndex), downNodeState);
}
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, clusterState, SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
}
private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) {
for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) {
State state = State.UP;
cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(NodeType.DISTRIBUTOR, state), 0);
cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo);
cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(NodeType.STORAGE, state), 0);
}
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithOneStorageNodeDown(int storageNodeIndex) {
return transitionToMaintenanceWithOneStorageNodeDown(storageNodeIndex, false);
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithNoStorageNodesDown() {
return transitionToMaintenanceWithOneStorageNodeDown(-1, false);
}
@Test
public void testCanUpgradeWhenAllUp() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeWhenAllUpOrRetired() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeWhenStorageIsDown() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithOneStorageNodeDown(nodeStorage.getIndex());
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCannotUpgradeWhenOtherStorageIsDown() {
int otherIndex = 2;
assertNotEquals(nodeStorage.getIndex(), otherIndex);
NodeStateChangeChecker.Result result = transitionToMaintenanceWithOneStorageNodeDown(otherIndex);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), containsString("Not enough storage nodes running"));
}
@Test
public void testNodeRatioRequirementConsidersGeneratedNodeStates() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
markAllNodesAsReportingStateUp(cluster);
ClusterState stateWithNodeDown = clusterState(String.format(
"version:%d distributor:4 storage:4 .3.s:d",
currentClusterStateVersion));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testDownDisallowedByNonRetiredState() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
defaultAllUpClusterState(),
State.UP,
currentClusterStateVersion,
0);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason());
}
@Test
public void testDownDisallowedByBuckets() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
retiredClusterStateSuffix(),
State.UP,
currentClusterStateVersion,
1);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("The storage node manages 1 buckets", result.getReason());
}
@Test
public void testDownDisallowedByReportedState() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
retiredClusterStateSuffix(),
State.INITIALIZING,
currentClusterStateVersion,
0);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason());
}
@Test
public void testDownDisallowedByVersionMismatch() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
retiredClusterStateSuffix(),
State.UP,
currentClusterStateVersion - 1,
0);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1",
result.getReason());
}
@Test
private NodeStateChangeChecker.Result evaluateDownTransition(
ClusterState clusterState,
State reportedState,
int hostInfoClusterStateVersion,
int lastAlldisksBuckets) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex());
nodeInfo.setReportedState(new NodeState(NodeType.STORAGE, reportedState), 0);
nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets));
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, clusterState, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, DOWN_NODE_STATE);
}
private ClusterState retiredClusterStateSuffix() {
return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r",
currentClusterStateVersion,
nodeStorage.getIndex()));
}
private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) {
return HostInfo.createHostInfo(String.format("{\n" +
" \"metrics\":\n" +
" {\n" +
" \"snapshot\":\n" +
" {\n" +
" \"from\":1494940706,\n" +
" \"to\":1494940766\n" +
" },\n" +
" \"values\":\n" +
" [\n" +
" {\n" +
" \"name\":\"vds.datastored.alldisks.buckets\",\n" +
" \"description\":\"buckets managed\",\n" +
" \"values\":\n" +
" {\n" +
" \"average\":262144.0,\n" +
" \"count\":1,\n" +
" \"rate\":0.016666,\n" +
" \"min\":262144,\n" +
" \"max\":262144,\n" +
" \"last\":%d\n" +
" },\n" +
" \"dimensions\":\n" +
" {\n" +
" }\n" +
" },\n" +
" {\n" +
" \"name\":\"vds.datastored.alldisks.docs\",\n" +
" \"description\":\"documents stored\",\n" +
" \"values\":\n" +
" {\n" +
" \"average\":154689587.0,\n" +
" \"count\":1,\n" +
" \"rate\":0.016666,\n" +
" \"min\":154689587,\n" +
" \"max\":154689587,\n" +
" \"last\":154689587\n" +
" },\n" +
" \"dimensions\":\n" +
" {\n" +
" }\n" +
" }\n" +
" ]\n" +
" },\n" +
" \"cluster-state-version\":%d\n" +
"}",
lastAlldisksBuckets, clusterStateVersion));
}
private List<ConfiguredNode> createNodes(int count) {
List<ConfiguredNode> nodes = new ArrayList<>();
for (int i = 0; i < count; i++)
nodes.add(new ConfiguredNode(i, false));
return nodes;
}
} |
Again, this seems counter intuitive. You return deactivated containers by filtering out those that are not deactivated? | private Stream<DeactivatedContainer> deactivatedContainerStream() {
synchronized (lock) {
return activeContainers.entrySet().stream()
.filter(e -> e.getKey() != null)
.filter(e -> !e.getValue().isDeactivated())
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactived));
}
} | .map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactived)); | private Stream<DeactivatedContainer> deactivatedContainerStream() {
synchronized (lock) {
return activeContainers.entrySet().stream()
.filter(e -> e.getValue().isDeactivated())
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactivated));
}
} | class ActiveContainerStatistics {
public interface Metrics {
String TOTAL_DEACTIVATED_CONTAINERS = "jdisc.deactivated_containers.total";
String DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES = "jdisc.deactivated_containers.with_retained_refs";
}
private static final Logger log = Logger.getLogger(ActiveContainerStatistics.class.getName());
private final WeakHashMap<ActiveContainer, ActiveContainerStats> activeContainers = new WeakHashMap<>();
private final Object lock = new Object();
public void onActivated(ActiveContainer activeContainer) {
synchronized (lock) {
activeContainers.put(activeContainer, new ActiveContainerStats(Instant.now()));
}
}
public void onDeactivated(ActiveContainer activeContainer) {
synchronized (lock) {
ActiveContainerStats containerStats = activeContainers.get(activeContainer);
if (containerStats == null) {
throw new IllegalStateException("onActivated() has not been called for container: " + activeContainer);
}
containerStats.setTimeDeactived(Instant.now());
}
}
public void outputMetrics(Metric metric) {
synchronized (lock) {
DeactivatedContainerMetrics metrics = deactivatedContainerStream()
.collect(
DeactivatedContainerMetrics::new,
DeactivatedContainerMetrics::aggregate,
DeactivatedContainerMetrics::merge);
metric.set(Metrics.TOTAL_DEACTIVATED_CONTAINERS, metrics.deactivatedContainerCount, null);
metric.set(Metrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, metrics.deactivatedContainersWithRetainedRefsCount, null);
}
}
public void printSummaryToLog() {
synchronized (lock) {
List<DeactivatedContainer> deactivatedContainers = deactivatedContainerStream().collect(toList());
if (deactivatedContainers.isEmpty()) {
return;
}
log.warning(
"Multiple instances of ActiveContainer leaked! " + deactivatedContainers.size() +
" instances are still present.");
deactivatedContainers.stream()
.map(c -> " - " + c.toSummaryString())
.forEach(log::warning);
}
}
private static class ActiveContainerStats {
public final Instant timeActivated;
public Instant timeDeactived;
public ActiveContainerStats(Instant timeActivated) {
this.timeActivated = timeActivated;
}
public void setTimeDeactived(Instant instant) {
this.timeDeactived = instant;
}
public boolean isDeactivated() {
return timeDeactived == null;
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final Instant timeActivated;
public final Instant timeDeactivated;
public DeactivatedContainer(ActiveContainer activeContainer, Instant timeActivated, Instant timeDeactivated) {
this.activeContainer = activeContainer;
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public String toSummaryString() {
return String.format("%s: timeActivated=%s, timeDeactivated=%s, retainCount=%d",
activeContainer.toString(),
timeActivated.toString(),
timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class DeactivatedContainerMetrics {
public int deactivatedContainerCount = 0;
public int deactivatedContainersWithRetainedRefsCount = 0;
public void aggregate(DeactivatedContainer deactivatedContainer) {
++deactivatedContainerCount;
if (deactivatedContainer.activeContainer.retainCount() > 0) {
++deactivatedContainersWithRetainedRefsCount;
}
}
public DeactivatedContainerMetrics merge(DeactivatedContainerMetrics other) {
deactivatedContainerCount += other.deactivatedContainerCount;
deactivatedContainersWithRetainedRefsCount += other.deactivatedContainersWithRetainedRefsCount;
return this;
}
}
} | class only constructible from this package
public void emitMetrics(Metric metric) {
synchronized (lock) {
DeactivatedContainerMetrics metrics = deactivatedContainerStream()
.collect(
DeactivatedContainerMetrics::new,
DeactivatedContainerMetrics::aggregate,
DeactivatedContainerMetrics::merge);
metric.set(Metrics.TOTAL_DEACTIVATED_CONTAINERS, metrics.deactivatedContainerCount, null);
metric.set(Metrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, metrics.deactivatedContainersWithRetainedRefsCount, null);
}
} |
Done | public void testAllowedToSetDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
ClusterState stateWithRetiredNode = clusterState(String.format(
"version:%d distributor:4 storage:4 .%d.s:r",
currentClusterStateVersion, nodeStorage.getIndex()));
cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex())
.setHostInfo(createHostInfoWithMetrics(currentClusterStateVersion, 0));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithRetiredNode, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, DOWN_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
} | .setHostInfo(createHostInfoWithMetrics(currentClusterStateVersion, 0)); | public void testAllowedToSetDown() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
retiredClusterStateSuffix(),
State.UP,
currentClusterStateVersion,
0);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
} | class NodeStateChangeCheckerTest {
private static final int minStorageNodesUp = 3;
private static final int requiredRedundancy = 4;
private static final int currentClusterStateVersion = 2;
private static final double minRatioOfStorageNodesUp = 0.9;
private static final Node nodeDistributor = new Node(NodeType.DISTRIBUTOR, 1);
private static final Node nodeStorage = new Node(NodeType.STORAGE, 1);
private static final NodeState UP_NODE_STATE = new NodeState(NodeType.STORAGE, State.UP);
public static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator");
public static final NodeState DOWN_NODE_STATE = createNodeState(State.DOWN, "RetireEarlyExpirer");
private static NodeState createNodeState(State state, String description) {
return new NodeState(NodeType.STORAGE, state).setDescription(description);
}
private static ClusterState clusterState(String state) {
try {
return new ClusterState(state);
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
private static ClusterState defaultAllUpClusterState() {
return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion));
}
private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) {
return new NodeStateChangeChecker(minStorageNodesUp, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
}
private ContentCluster createCluster(Collection<ConfiguredNode> nodes) {
Distribution distribution = mock(Distribution.class);
Group group = new Group(2, "to");
when(distribution.getRootGroup()).thenReturn(group);
return new ContentCluster("Clustername", nodes, distribution, minStorageNodesUp, 0.0);
}
private StorageNodeInfo createStorageNodeInfo(int index, State state) {
Distribution distribution = mock(Distribution.class);
Group group = new Group(2, "to");
when(distribution.getRootGroup()).thenReturn(group);
String clusterName = "Clustername";
Set<ConfiguredNode> configuredNodeIndexes = new HashSet<>();
ContentCluster cluster = new ContentCluster(clusterName, configuredNodeIndexes, distribution, minStorageNodesUp, 0.0);
String rpcAddress = "";
StorageNodeInfo storageNodeInfo = new StorageNodeInfo(cluster, index, false, rpcAddress, distribution);
storageNodeInfo.setReportedState(new NodeState(NodeType.STORAGE, state), 3 /* time */);
return storageNodeInfo;
}
private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) {
return "{\n" +
" \"cluster-state-version\": 2,\n" +
" \"distributor\": {\n" +
" \"storage-nodes\": [\n" +
" {\n" +
" \"node-index\": 0,\n" +
" \"min-current-replication-factor\": " + replicationfactor1 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 1,\n" +
" \"min-current-replication-factor\": " + replicationfactor2 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 2,\n" +
" \"min-current-replication-factor\": " + replicationfactor3 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 3\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n";
}
private void markAllNodesAsReportingStateUp(ContentCluster cluster) {
final ClusterInfo clusterInfo = cluster.clusterInfo();
final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size();
for (int i = 0; i < configuredNodeCount; i++) {
clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP), 0);
clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(NodeType.STORAGE, State.UP), 0);
}
}
@Test
public void testCanUpgradeForce() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeState newState = new NodeState(NodeType.STORAGE, State.INITIALIZING);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SetUnitStateRequest.Condition.FORCE,
UP_NODE_STATE, newState);
assertTrue(result.settingWantedStateIsAllowed());
assertTrue(!result.wantedStateAlreadySet());
}
@Test
public void testSafeSetStateDistributors() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), containsString("Safe-set of node state is only supported for storage nodes"));
}
@Test
public void testCanUpgradeSafeMissingStorage() {
ContentCluster cluster = createCluster(createNodes(4));
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
5 /* min storage nodes */, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("There are only 4 storage nodes up, while config requires at least 5"));
}
@Test
public void testCanUpgradeStorageSafeYes() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testSetUpFailsIfReportedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
markAllNodesAsReportingStateUp(cluster);
ClusterState stateWithNodeDown = clusterState(String.format(
"version:%d distributor:4 storage:4 .%d.s:d",
currentClusterStateVersion, nodeStorage.getIndex()));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SetUnitStateRequest.Condition.SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCannotSetUpIfUnknownOldStateAndReportedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
new NodeState(NodeType.STORAGE, State.DOWN), UP_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Refusing to set wanted state to up when it is currently in Down"));
}
@Test
public void testCanUpgradeStorageSafeNo() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Distributor 0 says storage node 1 " +
"has buckets with redundancy as low as 3, but we require at least 4"));
}
@Test
public void testCanUpgradeIfMissingMinReplicationFactor() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 3), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeIfStorageNodeMissingFromNodeInfo() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
String hostInfo = "{\n" +
" \"cluster-state-version\": 2,\n" +
" \"distributor\": {\n" +
" \"storage-nodes\": [\n" +
" {\n" +
" \"node-index\": 0,\n" +
" \"min-current-replication-factor\": " + requiredRedundancy + "\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n";
setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 1), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testMissingDistributorState() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(NodeType.STORAGE, State.UP), 0);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Distributor node (0) has not reported any cluster state version yet."));
}
private NodeStateChangeChecker.Result transitionToSameState(State state, String oldDescription, String newDescription) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeState currentNodeState = createNodeState(state, oldDescription);
NodeState newNodeState = createNodeState(state, newDescription);
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
currentNodeState, newNodeState);
}
private NodeStateChangeChecker.Result transitionToSameState(String oldDescription, String newDescription) {
return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription);
}
@Test
public void testSettingUpWhenUpCausesAlreadySet() {
NodeStateChangeChecker.Result result = transitionToSameState(State.UP, "foo", "bar");
assertTrue(result.wantedStateAlreadySet());
}
@Test
public void testSettingAlreadySetState() {
NodeStateChangeChecker.Result result = transitionToSameState("foo", "foo");
assertFalse(result.settingWantedStateIsAllowed());
assertTrue(result.wantedStateAlreadySet());
}
@Test
public void testDifferentDescriptionImpliesAlreadySet() {
NodeStateChangeChecker.Result result = transitionToSameState("foo", "bar");
assertFalse(result.settingWantedStateIsAllowed());
assertTrue(result.wantedStateAlreadySet());
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithOneStorageNodeDown(
int storageNodeIndex, boolean alternatingUpRetiredAndInitializing) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) {
State state = State.UP;
if (alternatingUpRetiredAndInitializing) {
if (x % 3 == 1) state = State.RETIRED;
else if (x % 3 == 2) state = State.INITIALIZING;
}
cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(NodeType.DISTRIBUTOR, state), 0);
cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(NodeType.STORAGE, state), 0);
}
ClusterState clusterState = defaultAllUpClusterState();
if (storageNodeIndex >= 0) {
NodeState downNodeState = new NodeState(NodeType.STORAGE, State.DOWN);
cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */);
clusterState.setNodeState(new Node(NodeType.STORAGE, storageNodeIndex), downNodeState);
}
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, clusterState, SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
}
private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) {
for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) {
State state = State.UP;
cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(NodeType.DISTRIBUTOR, state), 0);
cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo);
cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(NodeType.STORAGE, state), 0);
}
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithOneStorageNodeDown(int storageNodeIndex) {
return transitionToMaintenanceWithOneStorageNodeDown(storageNodeIndex, false);
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithNoStorageNodesDown() {
return transitionToMaintenanceWithOneStorageNodeDown(-1, false);
}
@Test
public void testCanUpgradeWhenAllUp() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeWhenAllUpOrRetired() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeWhenStorageIsDown() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithOneStorageNodeDown(nodeStorage.getIndex());
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCannotUpgradeWhenOtherStorageIsDown() {
int otherIndex = 2;
assertNotEquals(nodeStorage.getIndex(), otherIndex);
NodeStateChangeChecker.Result result = transitionToMaintenanceWithOneStorageNodeDown(otherIndex);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), containsString("Not enough storage nodes running"));
}
@Test
public void testNodeRatioRequirementConsidersGeneratedNodeStates() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
markAllNodesAsReportingStateUp(cluster);
ClusterState stateWithNodeDown = clusterState(String.format(
"version:%d distributor:4 storage:4 .3.s:d",
currentClusterStateVersion));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testDisallowedByNonRetiredState() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex())
.setHostInfo(createHostInfoWithMetrics(currentClusterStateVersion, 1));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, DOWN_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason());
}
@Test
public void testDisallowedByBuckets() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
ClusterState stateWithRetiredNode = clusterState(String.format(
"version:%d distributor:4 storage:4 .%d.s:r",
currentClusterStateVersion, nodeStorage.getIndex()));
cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex())
.setHostInfo(createHostInfoWithMetrics(currentClusterStateVersion, 1));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithRetiredNode, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, DOWN_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("The storage node manages 1 buckets", result.getReason());
}
@Test
private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) {
return HostInfo.createHostInfo(String.format("{\n" +
" \"metrics\":\n" +
" {\n" +
" \"snapshot\":\n" +
" {\n" +
" \"from\":1494940706,\n" +
" \"to\":1494940766\n" +
" },\n" +
" \"values\":\n" +
" [\n" +
" {\n" +
" \"name\":\"vds.datastored.alldisks.buckets\",\n" +
" \"description\":\"buckets managed\",\n" +
" \"values\":\n" +
" {\n" +
" \"average\":262144.0,\n" +
" \"count\":1,\n" +
" \"rate\":0.016666,\n" +
" \"min\":262144,\n" +
" \"max\":262144,\n" +
" \"last\":%d\n" +
" },\n" +
" \"dimensions\":\n" +
" {\n" +
" }\n" +
" },\n" +
" {\n" +
" \"name\":\"vds.datastored.alldisks.docs\",\n" +
" \"description\":\"documents stored\",\n" +
" \"values\":\n" +
" {\n" +
" \"average\":154689587.0,\n" +
" \"count\":1,\n" +
" \"rate\":0.016666,\n" +
" \"min\":154689587,\n" +
" \"max\":154689587,\n" +
" \"last\":154689587\n" +
" },\n" +
" \"dimensions\":\n" +
" {\n" +
" }\n" +
" }\n" +
" ]\n" +
" },\n" +
" \"cluster-state-version\":%d\n" +
"}",
lastAlldisksBuckets, clusterStateVersion));
}
private List<ConfiguredNode> createNodes(int count) {
List<ConfiguredNode> nodes = new ArrayList<>();
for (int i = 0; i < count; i++)
nodes.add(new ConfiguredNode(i, false));
return nodes;
}
} | class NodeStateChangeCheckerTest {
private static final int minStorageNodesUp = 3;
private static final int requiredRedundancy = 4;
private static final int currentClusterStateVersion = 2;
private static final double minRatioOfStorageNodesUp = 0.9;
private static final Node nodeDistributor = new Node(NodeType.DISTRIBUTOR, 1);
private static final Node nodeStorage = new Node(NodeType.STORAGE, 1);
private static final NodeState UP_NODE_STATE = new NodeState(NodeType.STORAGE, State.UP);
public static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator");
public static final NodeState DOWN_NODE_STATE = createNodeState(State.DOWN, "RetireEarlyExpirer");
private static NodeState createNodeState(State state, String description) {
return new NodeState(NodeType.STORAGE, state).setDescription(description);
}
private static ClusterState clusterState(String state) {
try {
return new ClusterState(state);
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
private static ClusterState defaultAllUpClusterState() {
return clusterState(String.format("version:%d distributor:4 storage:4", currentClusterStateVersion));
}
private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) {
return new NodeStateChangeChecker(minStorageNodesUp, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
}
private ContentCluster createCluster(Collection<ConfiguredNode> nodes) {
Distribution distribution = mock(Distribution.class);
Group group = new Group(2, "to");
when(distribution.getRootGroup()).thenReturn(group);
return new ContentCluster("Clustername", nodes, distribution, minStorageNodesUp, 0.0);
}
private StorageNodeInfo createStorageNodeInfo(int index, State state) {
Distribution distribution = mock(Distribution.class);
Group group = new Group(2, "to");
when(distribution.getRootGroup()).thenReturn(group);
String clusterName = "Clustername";
Set<ConfiguredNode> configuredNodeIndexes = new HashSet<>();
ContentCluster cluster = new ContentCluster(clusterName, configuredNodeIndexes, distribution, minStorageNodesUp, 0.0);
String rpcAddress = "";
StorageNodeInfo storageNodeInfo = new StorageNodeInfo(cluster, index, false, rpcAddress, distribution);
storageNodeInfo.setReportedState(new NodeState(NodeType.STORAGE, state), 3 /* time */);
return storageNodeInfo;
}
private String createDistributorHostInfo(int replicationfactor1, int replicationfactor2, int replicationfactor3) {
return "{\n" +
" \"cluster-state-version\": 2,\n" +
" \"distributor\": {\n" +
" \"storage-nodes\": [\n" +
" {\n" +
" \"node-index\": 0,\n" +
" \"min-current-replication-factor\": " + replicationfactor1 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 1,\n" +
" \"min-current-replication-factor\": " + replicationfactor2 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 2,\n" +
" \"min-current-replication-factor\": " + replicationfactor3 + "\n" +
" },\n" +
" {\n" +
" \"node-index\": 3\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n";
}
private void markAllNodesAsReportingStateUp(ContentCluster cluster) {
final ClusterInfo clusterInfo = cluster.clusterInfo();
final int configuredNodeCount = cluster.clusterInfo().getConfiguredNodes().size();
for (int i = 0; i < configuredNodeCount; i++) {
clusterInfo.getDistributorNodeInfo(i).setReportedState(new NodeState(NodeType.DISTRIBUTOR, State.UP), 0);
clusterInfo.getDistributorNodeInfo(i).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
clusterInfo.getStorageNodeInfo(i).setReportedState(new NodeState(NodeType.STORAGE, State.UP), 0);
}
}
@Test
public void testCanUpgradeForce() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeState newState = new NodeState(NodeType.STORAGE, State.INITIALIZING);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SetUnitStateRequest.Condition.FORCE,
UP_NODE_STATE, newState);
assertTrue(result.settingWantedStateIsAllowed());
assertTrue(!result.wantedStateAlreadySet());
}
@Test
public void testUnknownStorageNode() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
5 /* min storage nodes */, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 10), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Unknown node storage.10"));
}
@Test
public void testSafeSetStateDistributors() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), containsString("Safe-set of node state is only supported for storage nodes"));
}
@Test
public void testCanUpgradeSafeMissingStorage() {
ContentCluster cluster = createCluster(createNodes(4));
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
5 /* min storage nodes */, minRatioOfStorageNodesUp, requiredRedundancy, cluster.clusterInfo());
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("There are only 4 storage nodes up, while config requires at least 5"));
}
@Test
public void testCanUpgradeStorageSafeYes() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testSetUpFailsIfReportedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testSetUpSucceedsIfReportedIsUpButGeneratedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
markAllNodesAsReportingStateUp(cluster);
ClusterState stateWithNodeDown = clusterState(String.format(
"version:%d distributor:4 storage:4 .%d.s:d",
currentClusterStateVersion, nodeStorage.getIndex()));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SetUnitStateRequest.Condition.SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCannotSetUpIfUnknownOldStateAndReportedIsDown() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
new NodeState(NodeType.STORAGE, State.DOWN), UP_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Refusing to set wanted state to up when it is currently in Down"));
}
@Test
public void testCanUpgradeStorageSafeNo() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Distributor 0 says storage node 1 " +
"has buckets with redundancy as low as 3, but we require at least 4"));
}
@Test
public void testCanUpgradeIfMissingMinReplicationFactor() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
setAllNodesUp(cluster, HostInfo.createHostInfo(createDistributorHostInfo(4, 3, 6)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 3), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeIfStorageNodeMissingFromNodeInfo() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
String hostInfo = "{\n" +
" \"cluster-state-version\": 2,\n" +
" \"distributor\": {\n" +
" \"storage-nodes\": [\n" +
" {\n" +
" \"node-index\": 0,\n" +
" \"min-current-replication-factor\": " + requiredRedundancy + "\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n";
setAllNodesUp(cluster, HostInfo.createHostInfo(hostInfo));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 1), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testMissingDistributorState() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
cluster.clusterInfo().getStorageNodeInfo(1).setReportedState(new NodeState(NodeType.STORAGE, State.UP), 0);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), is("Distributor node (0) has not reported any cluster state version yet."));
}
private NodeStateChangeChecker.Result transitionToSameState(State state, String oldDescription, String newDescription) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
NodeState currentNodeState = createNodeState(state, oldDescription);
NodeState newNodeState = createNodeState(state, newDescription);
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
currentNodeState, newNodeState);
}
private NodeStateChangeChecker.Result transitionToSameState(String oldDescription, String newDescription) {
return transitionToSameState(State.MAINTENANCE, oldDescription, newDescription);
}
@Test
public void testSettingUpWhenUpCausesAlreadySet() {
NodeStateChangeChecker.Result result = transitionToSameState(State.UP, "foo", "bar");
assertTrue(result.wantedStateAlreadySet());
}
@Test
public void testSettingAlreadySetState() {
NodeStateChangeChecker.Result result = transitionToSameState("foo", "foo");
assertFalse(result.settingWantedStateIsAllowed());
assertTrue(result.wantedStateAlreadySet());
}
@Test
public void testDifferentDescriptionImpliesAlreadySet() {
NodeStateChangeChecker.Result result = transitionToSameState("foo", "bar");
assertFalse(result.settingWantedStateIsAllowed());
assertTrue(result.wantedStateAlreadySet());
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithOneStorageNodeDown(
int storageNodeIndex, boolean alternatingUpRetiredAndInitializing) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) {
State state = State.UP;
if (alternatingUpRetiredAndInitializing) {
if (x % 3 == 1) state = State.RETIRED;
else if (x % 3 == 2) state = State.INITIALIZING;
}
cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(NodeType.DISTRIBUTOR, state), 0);
cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(HostInfo.createHostInfo(createDistributorHostInfo(4, 5, 6)));
cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(NodeType.STORAGE, state), 0);
}
ClusterState clusterState = defaultAllUpClusterState();
if (storageNodeIndex >= 0) {
NodeState downNodeState = new NodeState(NodeType.STORAGE, State.DOWN);
cluster.clusterInfo().getStorageNodeInfo(storageNodeIndex).setReportedState(downNodeState, 4 /* time */);
clusterState.setNodeState(new Node(NodeType.STORAGE, storageNodeIndex), downNodeState);
}
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, clusterState, SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
}
private void setAllNodesUp(ContentCluster cluster, HostInfo distributorHostInfo) {
for (int x = 0; x < cluster.clusterInfo().getConfiguredNodes().size(); x++) {
State state = State.UP;
cluster.clusterInfo().getDistributorNodeInfo(x).setReportedState(new NodeState(NodeType.DISTRIBUTOR, state), 0);
cluster.clusterInfo().getDistributorNodeInfo(x).setHostInfo(distributorHostInfo);
cluster.clusterInfo().getStorageNodeInfo(x).setReportedState(new NodeState(NodeType.STORAGE, state), 0);
}
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithOneStorageNodeDown(int storageNodeIndex) {
return transitionToMaintenanceWithOneStorageNodeDown(storageNodeIndex, false);
}
private NodeStateChangeChecker.Result transitionToMaintenanceWithNoStorageNodesDown() {
return transitionToMaintenanceWithOneStorageNodeDown(-1, false);
}
@Test
public void testCanUpgradeWhenAllUp() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeWhenAllUpOrRetired() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithNoStorageNodesDown();
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCanUpgradeWhenStorageIsDown() {
NodeStateChangeChecker.Result result = transitionToMaintenanceWithOneStorageNodeDown(nodeStorage.getIndex());
assertTrue(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testCannotUpgradeWhenOtherStorageIsDown() {
int otherIndex = 2;
assertNotEquals(nodeStorage.getIndex(), otherIndex);
NodeStateChangeChecker.Result result = transitionToMaintenanceWithOneStorageNodeDown(otherIndex);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertThat(result.getReason(), containsString("Not enough storage nodes running"));
}
@Test
public void testNodeRatioRequirementConsidersGeneratedNodeStates() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
markAllNodesAsReportingStateUp(cluster);
ClusterState stateWithNodeDown = clusterState(String.format(
"version:%d distributor:4 storage:4 .3.s:d",
currentClusterStateVersion));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
}
@Test
public void testDownDisallowedByNonRetiredState() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
defaultAllUpClusterState(),
State.UP,
currentClusterStateVersion,
0);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason());
}
@Test
public void testDownDisallowedByBuckets() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
retiredClusterStateSuffix(),
State.UP,
currentClusterStateVersion,
1);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("The storage node manages 1 buckets", result.getReason());
}
@Test
public void testDownDisallowedByReportedState() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
retiredClusterStateSuffix(),
State.INITIALIZING,
currentClusterStateVersion,
0);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason());
}
@Test
public void testDownDisallowedByVersionMismatch() {
NodeStateChangeChecker.Result result = evaluateDownTransition(
retiredClusterStateSuffix(),
State.UP,
currentClusterStateVersion - 1,
0);
assertFalse(result.settingWantedStateIsAllowed());
assertFalse(result.wantedStateAlreadySet());
assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1",
result.getReason());
}
@Test
private NodeStateChangeChecker.Result evaluateDownTransition(
ClusterState clusterState,
State reportedState,
int hostInfoClusterStateVersion,
int lastAlldisksBuckets) {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(cluster);
StorageNodeInfo nodeInfo = cluster.clusterInfo().getStorageNodeInfo(nodeStorage.getIndex());
nodeInfo.setReportedState(new NodeState(NodeType.STORAGE, reportedState), 0);
nodeInfo.setHostInfo(createHostInfoWithMetrics(hostInfoClusterStateVersion, lastAlldisksBuckets));
return nodeStateChangeChecker.evaluateTransition(
nodeStorage, clusterState, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, DOWN_NODE_STATE);
}
private ClusterState retiredClusterStateSuffix() {
return clusterState(String.format("version:%d distributor:4 storage:4 .%d.s:r",
currentClusterStateVersion,
nodeStorage.getIndex()));
}
private static HostInfo createHostInfoWithMetrics(int clusterStateVersion, int lastAlldisksBuckets) {
return HostInfo.createHostInfo(String.format("{\n" +
" \"metrics\":\n" +
" {\n" +
" \"snapshot\":\n" +
" {\n" +
" \"from\":1494940706,\n" +
" \"to\":1494940766\n" +
" },\n" +
" \"values\":\n" +
" [\n" +
" {\n" +
" \"name\":\"vds.datastored.alldisks.buckets\",\n" +
" \"description\":\"buckets managed\",\n" +
" \"values\":\n" +
" {\n" +
" \"average\":262144.0,\n" +
" \"count\":1,\n" +
" \"rate\":0.016666,\n" +
" \"min\":262144,\n" +
" \"max\":262144,\n" +
" \"last\":%d\n" +
" },\n" +
" \"dimensions\":\n" +
" {\n" +
" }\n" +
" },\n" +
" {\n" +
" \"name\":\"vds.datastored.alldisks.docs\",\n" +
" \"description\":\"documents stored\",\n" +
" \"values\":\n" +
" {\n" +
" \"average\":154689587.0,\n" +
" \"count\":1,\n" +
" \"rate\":0.016666,\n" +
" \"min\":154689587,\n" +
" \"max\":154689587,\n" +
" \"last\":154689587\n" +
" },\n" +
" \"dimensions\":\n" +
" {\n" +
" }\n" +
" }\n" +
" ]\n" +
" },\n" +
" \"cluster-state-version\":%d\n" +
"}",
lastAlldisksBuckets, clusterStateVersion));
}
private List<ConfiguredNode> createNodes(int count) {
List<ConfiguredNode> nodes = new ArrayList<>();
for (int i = 0; i < count; i++)
nodes.add(new ConfiguredNode(i, false));
return nodes;
}
} |
The way I read how interrupts works: Calling interrupt() will cause another InterruptedException at a later time, which is not what we want. Calling interrupted() clears the interrupted status, but it will already be cleared at the time the InterruptedException was thrown. So just remove this statement. | private void storeClusterStateVersionToZooKeeper(ClusterState state) {
try {
database.saveLatestSystemStateVersion(databaseContext, state.getVersion());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("ZooKeeper write interrupted", e);
}
} | Thread.currentThread().interrupt(); | private void storeClusterStateVersionToZooKeeper(ClusterState state) {
try {
database.saveLatestSystemStateVersion(databaseContext, state.getVersion());
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private boolean running = true;
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new LinkedList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); }
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) throws Exception
{
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(metricUpdater);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController createForContainer(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
return create(options, timer, statusPageServer, null, metricReporter);
}
public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception {
Timer timer = new RealTimer();
RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy);
StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort);
return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter());
}
private static FleetController create(FleetControllerOptions options,
Timer timer,
StatusPageServerInterface statusPageServer,
RpcServer rpcServer,
MetricReporter metricReporter) throws Exception
{
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution,
options.minStorageNodesUp,
options.minRatioOfStorageNodesUp);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
synchronized(monitor) {
return running;
}
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
synchronized (systemStateListeners) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) throw new NullPointerException("Cluster state should never be null at this point");
listener.handleNewSystemState(state);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getHttpPort() { return statusPageServer.getPort(); }
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
boolean isStillRunning = false;
synchronized(monitor) {
if (running) {
isStillRunning = true;
}
}
if (runner != null && isStillRunning) {
log.log(LogLevel.INFO, "Joining event thread.");
running = false;
runner.interrupt();
runner.join();
}
log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo);
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) {
verifyInControllerThread();
newStates.add(state);
metricUpdater.updateClusterStateMetrics(cluster, state);
systemStateBroadcaster.handleNewSystemState(state);
if (masterElectionHandler.isMaster()) {
storeClusterStateVersionToZooKeeper(state);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
stateChangeHandler.handleAllDistributorsInSync(
stateVersionTracker.getVersionedClusterState(), nodes, database, context);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired())
return true;
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() throws java.io.IOException, ListenFailedException {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient)
((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp);
cluster.setMinStorageNodesUp(options.minStorageNodesUp);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTrace(e);
log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
didWork |= stateGatherer.processResponses(this);
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
didWork |= systemStateBroadcaster.processResponses();
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
}
didWork |= processAnyPendingStatusPageRequest();
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
didWork |= processNextQueuedRemoteTask();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime)
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
if ( ! didWork && ! waitingForCycle)
monitor.wait(options.cycleWaitTime);
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
propagateNewStatesToListeners();
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException {
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this);
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterState state : newStates) {
for(SystemStateListener listener : systemStateListeners) {
listener.handleNewSystemState(state);
}
}
newStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest("Processing remote task " + task.getClass().getName());
task.doRemoteFleetControllerTask(context);
task.notifyCompleted();
log.finest("Done processing remote task " + task.getClass().getName());
return true;
}
return false;
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentState = consolidatedClusterState();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
}
}
isStateGatherer = true;
return didWork;
}
private boolean recomputeClusterStateIfRequired() {
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
stateVersionTracker.updateLatestCandidateState(candidate);
if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper())
{
final long timeNowMs = timer.getCurrentTimeInMillis();
final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs);
handleNewSystemState(stateVersionTracker.getVersionedClusterState());
return true;
}
}
return false;
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState,
final AnnotatedClusterState toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
metricUpdater.becameMaster();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
didWork = database.loadStartTimestamps(cluster);
didWork |= database.loadWantedStates(databaseContext);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
metricUpdater.noLongerMaster();
}
wantedStateChanged = false;
isMaster = false;
}
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while(running)
tick();
} catch (InterruptedException e) {
log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t);
synchronized (monitor) { running = false; }
System.exit(1);
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if (!running) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getSystemStateVersionAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private boolean running = true;
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new LinkedList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); }
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) throws Exception
{
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(metricUpdater);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController createForContainer(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
return create(options, timer, statusPageServer, null, metricReporter);
}
public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception {
Timer timer = new RealTimer();
RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy);
StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort);
return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter());
}
private static FleetController create(FleetControllerOptions options,
Timer timer,
StatusPageServerInterface statusPageServer,
RpcServer rpcServer,
MetricReporter metricReporter) throws Exception
{
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution,
options.minStorageNodesUp,
options.minRatioOfStorageNodesUp);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
synchronized(monitor) {
return running;
}
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
synchronized (systemStateListeners) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) throw new NullPointerException("Cluster state should never be null at this point");
listener.handleNewSystemState(state);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getHttpPort() { return statusPageServer.getPort(); }
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
boolean isStillRunning = false;
synchronized(monitor) {
if (running) {
isStillRunning = true;
}
}
if (runner != null && isStillRunning) {
log.log(LogLevel.INFO, "Joining event thread.");
running = false;
runner.interrupt();
runner.join();
}
log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo);
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) {
verifyInControllerThread();
newStates.add(state);
metricUpdater.updateClusterStateMetrics(cluster, state);
systemStateBroadcaster.handleNewSystemState(state);
if (masterElectionHandler.isMaster()) {
storeClusterStateVersionToZooKeeper(state);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
stateChangeHandler.handleAllDistributorsInSync(
stateVersionTracker.getVersionedClusterState(), nodes, database, context);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired())
return true;
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() throws java.io.IOException, ListenFailedException {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient)
((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp);
cluster.setMinStorageNodesUp(options.minStorageNodesUp);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTrace(e);
log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
didWork |= stateGatherer.processResponses(this);
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
didWork |= systemStateBroadcaster.processResponses();
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
}
didWork |= processAnyPendingStatusPageRequest();
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
didWork |= processNextQueuedRemoteTask();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime)
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
if ( ! didWork && ! waitingForCycle)
monitor.wait(options.cycleWaitTime);
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
propagateNewStatesToListeners();
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException {
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this);
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterState state : newStates) {
for(SystemStateListener listener : systemStateListeners) {
listener.handleNewSystemState(state);
}
}
newStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest("Processing remote task " + task.getClass().getName());
task.doRemoteFleetControllerTask(context);
task.notifyCompleted();
log.finest("Done processing remote task " + task.getClass().getName());
return true;
}
return false;
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentState = consolidatedClusterState();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
}
}
isStateGatherer = true;
return didWork;
}
private boolean recomputeClusterStateIfRequired() {
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
stateVersionTracker.updateLatestCandidateState(candidate);
if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper())
{
final long timeNowMs = timer.getCurrentTimeInMillis();
final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs);
handleNewSystemState(stateVersionTracker.getVersionedClusterState());
return true;
}
}
return false;
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState,
final AnnotatedClusterState toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
metricUpdater.becameMaster();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
didWork = database.loadStartTimestamps(cluster);
didWork |= database.loadWantedStates(databaseContext);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
metricUpdater.noLongerMaster();
}
wantedStateChanged = false;
isMaster = false;
}
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while(running)
tick();
} catch (InterruptedException e) {
log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t);
synchronized (monitor) { running = false; }
System.exit(1);
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if (!running) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getSystemStateVersionAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} |
The best place to handle ZK-related exceptions would probably be in ZooKeeperDatabase.java. First of all, ZooKeeperDatabase.java seems to wrap InterruptedException totally unnecessarily in ZooKeeperDatabase.java. InterruptedException could be made RuntimeException there and 'throws InterruptedException' could be removed everywhere. But more importantly, I think we should handle InterruptedException and ConnectionLossException identically. Aligning InterruptedException with ConnectionLossException unfortunately means logging on failures, at best. I wonder if this could be problematic during upgrade, since connection loss may then happen naturally (or is the connection only to localhost ZK?). | private void storeClusterStateVersionToZooKeeper(ClusterState state) {
try {
database.saveLatestSystemStateVersion(databaseContext, state.getVersion());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("ZooKeeper write interrupted", e);
}
} | throw new RuntimeException("ZooKeeper write interrupted", e); | private void storeClusterStateVersionToZooKeeper(ClusterState state) {
try {
database.saveLatestSystemStateVersion(databaseContext, state.getVersion());
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private boolean running = true;
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new LinkedList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); }
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) throws Exception
{
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(metricUpdater);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController createForContainer(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
return create(options, timer, statusPageServer, null, metricReporter);
}
public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception {
Timer timer = new RealTimer();
RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy);
StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort);
return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter());
}
private static FleetController create(FleetControllerOptions options,
Timer timer,
StatusPageServerInterface statusPageServer,
RpcServer rpcServer,
MetricReporter metricReporter) throws Exception
{
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution,
options.minStorageNodesUp,
options.minRatioOfStorageNodesUp);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
synchronized(monitor) {
return running;
}
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
synchronized (systemStateListeners) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) throw new NullPointerException("Cluster state should never be null at this point");
listener.handleNewSystemState(state);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getHttpPort() { return statusPageServer.getPort(); }
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
boolean isStillRunning = false;
synchronized(monitor) {
if (running) {
isStillRunning = true;
}
}
if (runner != null && isStillRunning) {
log.log(LogLevel.INFO, "Joining event thread.");
running = false;
runner.interrupt();
runner.join();
}
log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo);
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) {
verifyInControllerThread();
newStates.add(state);
metricUpdater.updateClusterStateMetrics(cluster, state);
systemStateBroadcaster.handleNewSystemState(state);
if (masterElectionHandler.isMaster()) {
storeClusterStateVersionToZooKeeper(state);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
stateChangeHandler.handleAllDistributorsInSync(
stateVersionTracker.getVersionedClusterState(), nodes, database, context);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired())
return true;
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() throws java.io.IOException, ListenFailedException {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient)
((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp);
cluster.setMinStorageNodesUp(options.minStorageNodesUp);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTrace(e);
log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
didWork |= stateGatherer.processResponses(this);
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
didWork |= systemStateBroadcaster.processResponses();
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
}
didWork |= processAnyPendingStatusPageRequest();
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
didWork |= processNextQueuedRemoteTask();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime)
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
if ( ! didWork && ! waitingForCycle)
monitor.wait(options.cycleWaitTime);
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
propagateNewStatesToListeners();
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException {
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this);
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterState state : newStates) {
for(SystemStateListener listener : systemStateListeners) {
listener.handleNewSystemState(state);
}
}
newStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest("Processing remote task " + task.getClass().getName());
task.doRemoteFleetControllerTask(context);
task.notifyCompleted();
log.finest("Done processing remote task " + task.getClass().getName());
return true;
}
return false;
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentState = consolidatedClusterState();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
}
}
isStateGatherer = true;
return didWork;
}
private boolean recomputeClusterStateIfRequired() {
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
stateVersionTracker.updateLatestCandidateState(candidate);
if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper())
{
final long timeNowMs = timer.getCurrentTimeInMillis();
final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs);
handleNewSystemState(stateVersionTracker.getVersionedClusterState());
return true;
}
}
return false;
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState,
final AnnotatedClusterState toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
metricUpdater.becameMaster();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
didWork = database.loadStartTimestamps(cluster);
didWork |= database.loadWantedStates(databaseContext);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
metricUpdater.noLongerMaster();
}
wantedStateChanged = false;
isMaster = false;
}
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while(running)
tick();
} catch (InterruptedException e) {
log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t);
synchronized (monitor) { running = false; }
System.exit(1);
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if (!running) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getSystemStateVersionAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private boolean running = true;
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new LinkedList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); }
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) throws Exception
{
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(metricUpdater);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController createForContainer(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
return create(options, timer, statusPageServer, null, metricReporter);
}
public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception {
Timer timer = new RealTimer();
RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy);
StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort);
return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter());
}
private static FleetController create(FleetControllerOptions options,
Timer timer,
StatusPageServerInterface statusPageServer,
RpcServer rpcServer,
MetricReporter metricReporter) throws Exception
{
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution,
options.minStorageNodesUp,
options.minRatioOfStorageNodesUp);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
synchronized(monitor) {
return running;
}
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
synchronized (systemStateListeners) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) throw new NullPointerException("Cluster state should never be null at this point");
listener.handleNewSystemState(state);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getHttpPort() { return statusPageServer.getPort(); }
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
boolean isStillRunning = false;
synchronized(monitor) {
if (running) {
isStillRunning = true;
}
}
if (runner != null && isStillRunning) {
log.log(LogLevel.INFO, "Joining event thread.");
running = false;
runner.interrupt();
runner.join();
}
log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo);
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) {
verifyInControllerThread();
newStates.add(state);
metricUpdater.updateClusterStateMetrics(cluster, state);
systemStateBroadcaster.handleNewSystemState(state);
if (masterElectionHandler.isMaster()) {
storeClusterStateVersionToZooKeeper(state);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
stateChangeHandler.handleAllDistributorsInSync(
stateVersionTracker.getVersionedClusterState(), nodes, database, context);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired())
return true;
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() throws java.io.IOException, ListenFailedException {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient)
((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp);
cluster.setMinStorageNodesUp(options.minStorageNodesUp);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTrace(e);
log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
didWork |= stateGatherer.processResponses(this);
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
didWork |= systemStateBroadcaster.processResponses();
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
}
didWork |= processAnyPendingStatusPageRequest();
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
didWork |= processNextQueuedRemoteTask();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime)
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
if ( ! didWork && ! waitingForCycle)
monitor.wait(options.cycleWaitTime);
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
propagateNewStatesToListeners();
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException {
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this);
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterState state : newStates) {
for(SystemStateListener listener : systemStateListeners) {
listener.handleNewSystemState(state);
}
}
newStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest("Processing remote task " + task.getClass().getName());
task.doRemoteFleetControllerTask(context);
task.notifyCompleted();
log.finest("Done processing remote task " + task.getClass().getName());
return true;
}
return false;
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentState = consolidatedClusterState();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
}
}
isStateGatherer = true;
return didWork;
}
private boolean recomputeClusterStateIfRequired() {
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
stateVersionTracker.updateLatestCandidateState(candidate);
if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper())
{
final long timeNowMs = timer.getCurrentTimeInMillis();
final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs);
handleNewSystemState(stateVersionTracker.getVersionedClusterState());
return true;
}
}
return false;
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState,
final AnnotatedClusterState toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
metricUpdater.becameMaster();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
didWork = database.loadStartTimestamps(cluster);
didWork |= database.loadWantedStates(databaseContext);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
metricUpdater.noLongerMaster();
}
wantedStateChanged = false;
isMaster = false;
}
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while(running)
tick();
} catch (InterruptedException e) {
log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t);
synchronized (monitor) { running = false; }
System.exit(1);
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if (!running) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getSystemStateVersionAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} |
Agreed on all counts, though altering `ZooKeeperDatabase` is enough work that it should be its own task. I suggest deferring it and getting this fix in first. PTAL. | private void storeClusterStateVersionToZooKeeper(ClusterState state) {
try {
database.saveLatestSystemStateVersion(databaseContext, state.getVersion());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("ZooKeeper write interrupted", e);
}
} | throw new RuntimeException("ZooKeeper write interrupted", e); | private void storeClusterStateVersionToZooKeeper(ClusterState state) {
try {
database.saveLatestSystemStateVersion(databaseContext, state.getVersion());
} catch (InterruptedException e) {
throw new RuntimeException("ZooKeeper write interrupted", e);
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private boolean running = true;
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new LinkedList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); }
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) throws Exception
{
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(metricUpdater);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController createForContainer(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
return create(options, timer, statusPageServer, null, metricReporter);
}
public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception {
Timer timer = new RealTimer();
RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy);
StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort);
return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter());
}
private static FleetController create(FleetControllerOptions options,
Timer timer,
StatusPageServerInterface statusPageServer,
RpcServer rpcServer,
MetricReporter metricReporter) throws Exception
{
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution,
options.minStorageNodesUp,
options.minRatioOfStorageNodesUp);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
synchronized(monitor) {
return running;
}
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
synchronized (systemStateListeners) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) throw new NullPointerException("Cluster state should never be null at this point");
listener.handleNewSystemState(state);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getHttpPort() { return statusPageServer.getPort(); }
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
boolean isStillRunning = false;
synchronized(monitor) {
if (running) {
isStillRunning = true;
}
}
if (runner != null && isStillRunning) {
log.log(LogLevel.INFO, "Joining event thread.");
running = false;
runner.interrupt();
runner.join();
}
log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo);
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) {
verifyInControllerThread();
newStates.add(state);
metricUpdater.updateClusterStateMetrics(cluster, state);
systemStateBroadcaster.handleNewSystemState(state);
if (masterElectionHandler.isMaster()) {
storeClusterStateVersionToZooKeeper(state);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
stateChangeHandler.handleAllDistributorsInSync(
stateVersionTracker.getVersionedClusterState(), nodes, database, context);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired())
return true;
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() throws java.io.IOException, ListenFailedException {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient)
((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp);
cluster.setMinStorageNodesUp(options.minStorageNodesUp);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTrace(e);
log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
didWork |= stateGatherer.processResponses(this);
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
didWork |= systemStateBroadcaster.processResponses();
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
}
didWork |= processAnyPendingStatusPageRequest();
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
didWork |= processNextQueuedRemoteTask();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime)
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
if ( ! didWork && ! waitingForCycle)
monitor.wait(options.cycleWaitTime);
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
propagateNewStatesToListeners();
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException {
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this);
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterState state : newStates) {
for(SystemStateListener listener : systemStateListeners) {
listener.handleNewSystemState(state);
}
}
newStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest("Processing remote task " + task.getClass().getName());
task.doRemoteFleetControllerTask(context);
task.notifyCompleted();
log.finest("Done processing remote task " + task.getClass().getName());
return true;
}
return false;
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentState = consolidatedClusterState();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
}
}
isStateGatherer = true;
return didWork;
}
private boolean recomputeClusterStateIfRequired() {
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
stateVersionTracker.updateLatestCandidateState(candidate);
if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper())
{
final long timeNowMs = timer.getCurrentTimeInMillis();
final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs);
handleNewSystemState(stateVersionTracker.getVersionedClusterState());
return true;
}
}
return false;
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState,
final AnnotatedClusterState toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
metricUpdater.becameMaster();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
didWork = database.loadStartTimestamps(cluster);
didWork |= database.loadWantedStates(databaseContext);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
metricUpdater.noLongerMaster();
}
wantedStateChanged = false;
isMaster = false;
}
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while(running)
tick();
} catch (InterruptedException e) {
log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t);
synchronized (monitor) { running = false; }
System.exit(1);
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if (!running) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getSystemStateVersionAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} | class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAddedOrRemovedListener, SystemStateListener,
Runnable, RemoteClusterControllerTaskScheduler {
private static Logger log = Logger.getLogger(FleetController.class.getName());
private final Timer timer;
private final Object monitor;
private final EventLog eventLog;
private final NodeLookup nodeLookup;
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
private Thread runner = null;
private boolean running = true;
private FleetControllerOptions options;
private FleetControllerOptions nextOptions;
private final List<SystemStateListener> systemStateListeners = new LinkedList<>();
private boolean processingCycle = false;
private boolean wantedStateChanged = false;
private long cycleCount = 0;
private long nextStateSendTime = 0;
private Long controllerThreadId = null;
private boolean waitingForCycle = false;
private StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter();
private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
private boolean isStateGatherer = false;
private long firstAllowedStateBroadcast = Long.MAX_VALUE;
private long tickStartTime = Long.MAX_VALUE;
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); }
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
public long getConfigGeneration() { return configGeneration; }
@Override
public ContentCluster getCluster() { return cluster; }
};
public FleetController(Timer timer,
EventLog eventLog,
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
FleetControllerOptions options) throws Exception
{
log.info("Starting up cluster controller " + options.fleetControllerIndex + " for cluster " + cluster.getName());
this.timer = timer;
this.monitor = timer;
this.eventLog = eventLog;
this.options = options;
this.nodeLookup = nodeLookup;
this.cluster = cluster;
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(metricUpdater);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
"^/node=([a-z]+)\\.(\\d+)$",
new LegacyNodePageRequestHandler(timer, eventLog, cluster));
this.statusRequestRouter.addHandler(
"^/state.*",
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
}
public static FleetController createForContainer(FleetControllerOptions options,
StatusPageServerInterface statusPageServer,
MetricReporter metricReporter) throws Exception {
Timer timer = new RealTimer();
return create(options, timer, statusPageServer, null, metricReporter);
}
public static FleetController createForStandAlone(FleetControllerOptions options) throws Exception {
Timer timer = new RealTimer();
RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy);
StatusPageServer statusPageServer = new StatusPageServer(timer, timer, options.httpPort);
return create(options, timer, statusPageServer, rpcServer, new NoMetricReporter());
}
private static FleetController create(FleetControllerOptions options,
Timer timer,
StatusPageServerInterface statusPageServer,
RpcServer rpcServer,
MetricReporter metricReporter) throws Exception
{
MetricUpdater metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex);
EventLog log = new EventLog(timer, metricUpdater);
ContentCluster cluster = new ContentCluster(
options.clusterName,
options.nodes,
options.storageDistribution,
options.minStorageNodesUp,
options.minRatioOfStorageNodesUp);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, log);
Communicator communicator = new RPCCommunicator(
timer,
options.fleetControllerIndex,
options.nodeStateRequestTimeoutMS,
options.nodeStateRequestTimeoutEarliestPercentage,
options.nodeStateRequestTimeoutLatestPercentage,
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
timer, log, cluster, stateGatherer, communicator, statusPageServer, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
}
public void start() {
runner = new Thread(this);
runner.start();
}
public Object getMonitor() { return monitor; }
public boolean isRunning() {
synchronized(monitor) {
return running;
}
}
public boolean isMaster() {
synchronized (monitor) {
return masterElectionHandler.isMaster();
}
}
public ClusterState getClusterState() {
synchronized (monitor) {
return systemStateBroadcaster.getClusterState();
}
}
public void schedule(RemoteClusterControllerTask task) {
synchronized (monitor) {
log.fine("Scheduled remote task " + task.getClass().getName() + " for execution");
remoteTasks.add(task);
}
}
/** Used for unit testing. */
public void addSystemStateListener(SystemStateListener listener) {
synchronized (systemStateListeners) {
systemStateListeners.add(listener);
com.yahoo.vdslib.state.ClusterState state = getSystemState();
if (state == null) throw new NullPointerException("Cluster state should never be null at this point");
listener.handleNewSystemState(state);
}
}
public FleetControllerOptions getOptions() {
synchronized(monitor) {
return options.clone();
}
}
public NodeState getReportedNodeState(Node n) {
synchronized(monitor) {
NodeInfo node = cluster.getNodeInfo(n);
if (node == null) {
throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster);
}
return node.getReportedState();
}
}
public NodeState getWantedNodeState(Node n) {
synchronized(monitor) {
return cluster.getNodeInfo(n).getWantedState();
}
}
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
return stateVersionTracker.getVersionedClusterState();
}
}
public int getHttpPort() { return statusPageServer.getPort(); }
public int getRpcPort() { return rpcServer.getPort(); }
public void shutdown() throws InterruptedException, java.io.IOException {
boolean isStillRunning = false;
synchronized(monitor) {
if (running) {
isStillRunning = true;
}
}
if (runner != null && isStillRunning) {
log.log(LogLevel.INFO, "Joining event thread.");
running = false;
runner.interrupt();
runner.join();
}
log.log(LogLevel.INFO, "Fleetcontroller done shutting down event thread.");
controllerThreadId = Thread.currentThread().getId();
database.shutdown(this);
if (statusPageServer != null) {
statusPageServer.shutdown();
}
if (rpcServer != null) {
rpcServer.shutdown();
}
communicator.shutdown();
nodeLookup.shutdown();
}
public void updateOptions(FleetControllerOptions options, long configGeneration) {
synchronized(monitor) {
assert(this.options.fleetControllerIndex == options.fleetControllerIndex);
log.log(LogLevel.INFO, "Fleetcontroller " + options.fleetControllerIndex + " has new options");
nextOptions = options.clone();
nextConfigGeneration = configGeneration;
monitor.notifyAll();
}
}
private void verifyInControllerThread() {
if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) {
throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen.");
}
}
private ClusterState latestCandidateClusterState() {
return stateVersionTracker.getLatestCandidateState().getClusterState();
}
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo);
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
stateChangeHandler.handleReturnedRpcAddress(node);
}
public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) {
verifyInControllerThread();
newStates.add(state);
metricUpdater.updateClusterStateMetrics(cluster, state);
systemStateBroadcaster.handleNewSystemState(state);
if (masterElectionHandler.isMaster()) {
storeClusterStateVersionToZooKeeper(state);
}
}
/**
* This function gives data of the current state in master election.
* The keys in the given map are indexes of fleet controllers.
* The values are what fleetcontroller that fleetcontroller wants to
* become master.
*
* If more than half the fleetcontrollers want a node to be master and
* that node also wants itself as master, that node is the single master.
* If this condition is not met, there is currently no master.
*/
public void handleFleetData(Map<Integer, Integer> data) {
verifyInControllerThread();
log.log(LogLevel.SPAM, "Sending fleet data event on to master election handler");
metricUpdater.updateMasterElectionMetrics(data);
masterElectionHandler.handleFleetData(data);
}
/**
* Called when we can no longer contact database.
*/
public void lostDatabaseConnection() {
verifyInControllerThread();
masterElectionHandler.lostDatabaseConnection();
}
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
stateChangeHandler.handleAllDistributorsInSync(
stateVersionTracker.getVersionedClusterState(), nodes, database, context);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
if (newNodes.size() != cluster.getConfiguredNodes().size()) return true;
if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true;
for (ConfiguredNode node : newNodes) {
if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired())
return true;
}
return false;
}
/** This is called when the options field has been set to a new set of options */
private void propagateOptions() throws java.io.IOException, ListenFailedException {
verifyInControllerThread();
if (changesConfiguredNodeSet(options.nodes)) {
cluster.setSlobrokGenerationCount(0);
}
communicator.propagateOptions(options);
if (nodeLookup instanceof SlobrokClient)
((SlobrokClient)nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs);
eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize);
cluster.setPollingFrequency(options.statePollingFrequency);
cluster.setDistribution(options.storageDistribution);
cluster.setNodes(options.nodes);
cluster.setMinRatioOfStorageNodesUp(options.minRatioOfStorageNodesUp);
cluster.setMinStorageNodesUp(options.minStorageNodesUp);
database.setZooKeeperAddress(options.zooKeeperServerAddress);
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
stateChangeHandler.reconfigureFromOptions(options);
stateChangeHandler.setStateChangedFlag();
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
try{
rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort);
} catch (ListenFailedException e) {
log.log(LogLevel.WARNING, "Failed to bind RPC server to port " + options.rpcPort +". This may be natural if cluster has altered the services running on this node: " + e.getMessage());
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize RPC server socket: " + e.getMessage());
}
}
if (statusPageServer != null) {
try{
statusPageServer.setPort(options.httpPort);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage());
}
}
long currentTime = timer.getCurrentTimeInMillis();
nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime);
configGeneration = nextConfigGeneration;
nextConfigGeneration = -1;
}
public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) {
verifyInControllerThread();
StatusPageResponse.ResponseCode responseCode;
String message;
String hiddenMessage = "";
try {
StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest);
if (handler == null) {
throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath());
}
return handler.handle(httpRequest);
} catch (FileNotFoundException e) {
responseCode = StatusPageResponse.ResponseCode.NOT_FOUND;
message = e.getMessage();
} catch (Exception e) {
responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR;
message = "Internal Server Error";
hiddenMessage = ExceptionUtils.getStackTrace(e);
log.log(LogLevel.DEBUG, "Unknown exception thrown for request " + httpRequest.getRequest() +
": " + hiddenMessage);
}
TimeZone tz = TimeZone.getTimeZone("UTC");
long currentTime = timer.getCurrentTimeInMillis();
StatusPageResponse response = new StatusPageResponse();
StringBuilder content = new StringBuilder();
response.setContentType("text/html");
response.setResponseCode(responseCode);
content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n");
content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>");
response.writeHtmlHeader(content, message);
response.writeHtmlFooter(content, hiddenMessage);
response.writeContent(content.toString());
return response;
}
public void tick() throws Exception {
synchronized (monitor) {
boolean didWork;
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
stateChangeHandler.setMaster(isMaster);
didWork |= stateGatherer.processResponses(this);
if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) {
didWork |= resyncLocallyCachedState();
} else {
stepDownAsStateGatherer();
}
didWork |= systemStateBroadcaster.processResponses();
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
}
didWork |= processAnyPendingStatusPageRequest();
if (rpcServer != null) {
didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
didWork |= processNextQueuedRemoteTask();
processingCycle = false;
++cycleCount;
long tickStopTime = timer.getCurrentTimeInMillis();
if (tickStopTime >= tickStartTime)
metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork);
if ( ! didWork && ! waitingForCycle)
monitor.wait(options.cycleWaitTime);
tickStartTime = timer.getCurrentTimeInMillis();
processingCycle = true;
if (nextOptions != null) {
switchToNewConfig();
}
}
propagateNewStatesToListeners();
}
private boolean updateMasterElectionState() throws InterruptedException {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed to watch master election: " + e.toString());
}
return false;
}
private void stepDownAsStateGatherer() {
if (isStateGatherer) {
cluster.clearStates();
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis()));
}
isStateGatherer = false;
}
private void switchToNewConfig() {
options = nextOptions;
nextOptions = null;
try {
propagateOptions();
} catch (Exception e) {
log.log(LogLevel.ERROR, "Failed to handle new fleet controller config", e);
}
}
private boolean processAnyPendingStatusPageRequest() {
if (statusPageServer != null) {
StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest();
if (statusRequest != null) {
statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest));
return true;
}
}
return false;
}
private boolean broadcastClusterStateToEligibleNodes() throws InterruptedException {
boolean sentAny = false;
long currentTime = timer.getCurrentTimeInMillis();
if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported())
&& currentTime >= nextStateSendTime)
{
if (currentTime < firstAllowedStateBroadcast) {
log.log(LogLevel.DEBUG, "Not set to broadcast states just yet, but as we have gotten info from all nodes we can do so safely.");
firstAllowedStateBroadcast = currentTime;
}
sentAny = systemStateBroadcaster.broadcastNewState(database, databaseContext, communicator, this);
if (sentAny) {
nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates;
}
}
return sentAny;
}
private void propagateNewStatesToListeners() {
if ( ! newStates.isEmpty()) {
synchronized (systemStateListeners) {
for (ClusterState state : newStates) {
for(SystemStateListener listener : systemStateListeners) {
listener.handleNewSystemState(state);
}
}
newStates.clear();
}
}
}
private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
final RemoteClusterControllerTask task = remoteTasks.poll();
log.finest("Processing remote task " + task.getClass().getName());
task.doRemoteFleetControllerTask(context);
task.notifyCompleted();
log.finest("Done processing remote task " + task.getClass().getName());
return true;
}
return false;
}
private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
context.cluster = cluster;
context.currentState = consolidatedClusterState();
context.masterInfo = masterElectionHandler;
context.nodeStateOrHostInfoChangeHandler = this;
context.nodeAddedOrRemovedListener = this;
return context;
}
/**
* A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
* up or down even when the whole cluster is down. The regular, published cluster state is not
* normally updated to reflect node events when the cluster is down.
*/
ClusterState consolidatedClusterState() {
final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
if (publishedState.getClusterState() == State.UP) {
return publishedState;
}
final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
current.setVersion(publishedState.getVersion());
return current;
}
/*
System test observations:
- a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
- long time before content node state convergence (though this seems to be the case for legacy impl as well)
*/
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
if ( ! isMaster && cycleCount % 100 == 0) {
didWork = database.loadWantedStates(databaseContext);
didWork |= database.loadStartTimestamps(cluster);
}
didWork |= nodeLookup.updateCluster(cluster, this);
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this);
didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
}
}
isStateGatherer = true;
return didWork;
}
private boolean recomputeClusterStateIfRequired() {
if (mustRecomputeCandidateClusterState()) {
stateChangeHandler.unsetStateChangedFlag();
final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
stateVersionTracker.updateLatestCandidateState(candidate);
if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
|| stateVersionTracker.hasReceivedNewVersionFromZooKeeper())
{
final long timeNowMs = timer.getCurrentTimeInMillis();
final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState();
stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs);
handleNewSystemState(stateVersionTracker.getVersionedClusterState());
return true;
}
}
return false;
}
private AnnotatedClusterState computeCurrentAnnotatedState() {
ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
.cluster(cluster)
.lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
return ClusterStateGenerator.generatedStateFrom(params);
}
private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState,
final AnnotatedClusterState toState,
final long timeNowMs) {
final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
EventDiffCalculator.params()
.cluster(cluster)
.fromState(fromState)
.toState(toState)
.currentTimeMs(timeNowMs));
for (Event event : deltaEvents) {
eventLog.add(event, isMaster);
}
emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState());
}
private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
fromClusterState.getTextualDifference(toClusterState),
timeNowMs), isMaster);
if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
eventLog.add(new ClusterEvent(
ClusterEvent.Type.SYSTEMSTATE,
"Altering distribution bits in system from "
+ fromClusterState.getDistributionBitCount() + " to " +
toClusterState.getDistributionBitCount(),
timeNowMs), isMaster);
}
}
private boolean mustRecomputeCandidateClusterState() {
return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper();
}
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
metricUpdater.becameMaster();
stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
didWork = database.loadStartTimestamps(cluster);
didWork |= database.loadWantedStates(databaseContext);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
+ stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be "
+ options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + ".");
}
isMaster = true;
if (wantedStateChanged) {
database.saveWantedStates(databaseContext);
wantedStateChanged = false;
}
} else {
if (isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis()));
firstAllowedStateBroadcast = Long.MAX_VALUE;
metricUpdater.noLongerMaster();
}
wantedStateChanged = false;
isMaster = false;
}
return didWork;
}
public void run() {
controllerThreadId = Thread.currentThread().getId();
try {
processingCycle = true;
while(running)
tick();
} catch (InterruptedException e) {
log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
t.printStackTrace();
log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t);
synchronized (monitor) { running = false; }
System.exit(1);
}
}
public DatabaseHandler.Context databaseContext = new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() { return cluster; }
@Override
public FleetController getFleetController() { return FleetController.this; }
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() { return FleetController.this; }
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() { return FleetController.this; }
};
public void waitForCompleteCycle(long timeoutMS) {
long endTime = System.currentTimeMillis() + timeoutMS;
synchronized (monitor) {
long wantedCycle = cycleCount + (processingCycle ? 2 : 1);
waitingForCycle = true;
try{
while (cycleCount < wantedCycle) {
if (System.currentTimeMillis() > endTime) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeoutMS + " ms.");
if (!running) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles");
try{ monitor.wait(100); } catch (InterruptedException e) {}
}
} finally {
waitingForCycle = false;
}
}
}
/**
* This function might not be 100% threadsafe, as in theory cluster can be changing while accessed.
* But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce
* live performance to remove a non-problem.
*/
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, int timeout) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeout;
synchronized (monitor) {
while (true) {
int ackedNodes = 0;
for (NodeInfo node : cluster.getNodeInfo()) {
if (node.getSystemStateVersionAcknowledged() >= version) {
++ackedNodes;
}
}
if (ackedNodes >= nodeCount) {
log.log(LogLevel.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher.");
return;
}
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout + " milliseconds.");
}
monitor.wait(10);
}
}
}
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, int timeoutMillis) throws InterruptedException {
long maxTime = System.currentTimeMillis() + timeoutMillis;
synchronized (monitor) {
while (true) {
int distCount = 0, storCount = 0;
for (NodeInfo info : cluster.getNodeInfo()) {
if (!info.isRpcAddressOutdated()) {
if (info.isDistributor()) ++distCount;
else ++storCount;
}
}
if (distCount == distNodeCount && storCount == storNodeCount) return;
long remainingTime = maxTime - System.currentTimeMillis();
if (remainingTime <= 0) {
throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount
+ " storage nodes registered in slobrok within timeout of " + timeoutMillis + " ms. (Got "
+ distCount + " distributors and " + storCount + " storage nodes)");
}
monitor.wait(10);
}
}
}
public boolean hasZookeeperConnection() { return !database.isClosed(); }
public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); }
public ContentCluster getCluster() { return cluster; }
public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); }
public EventLog getEventLog() {
return eventLog;
}
} |
Have you verified that there are no other components depending on versionState being updated before their constructor is run? (Such a component would have to take ConfigServerBootstrap as a ctor arg to ensure it's set up after this.) | public void run() {
if (versionState.isUpgraded()) {
log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications");
tenants.redeployApplications(deployer);
log.log(LogLevel.INFO, "All applications redeployed");
}
versionState.saveNewVersion();
log.log(LogLevel.DEBUG, "Starting RPC server");
server.run();
log.log(LogLevel.DEBUG, "RPC server stopped");
} | versionState.saveNewVersion(); | public void run() {
if (versionState.isUpgraded()) {
log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications");
tenants.redeployApplications(deployer);
log.log(LogLevel.INFO, "All applications redeployed");
}
versionState.saveNewVersion();
log.log(LogLevel.DEBUG, "Starting RPC server");
server.run();
log.log(LogLevel.DEBUG, "RPC server stopped");
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName());
private final Tenants tenants;
private final RpcServer server;
private final Thread serverThread;
private final Deployer deployer;
private final VersionState versionState;
@SuppressWarnings("UnusedParameters")
@Inject
public ConfigServerBootstrap(Tenants tenants, RpcServer server, Deployer deployer, VersionState versionState) {
this.tenants = tenants;
this.server = server;
this.deployer = deployer;
this.versionState = versionState;
this.serverThread = new Thread(this, "configserver main");
serverThread.start();
}
@Override
public void deconstruct() {
log.log(LogLevel.INFO, "Stopping config server");
server.stop();
try {
serverThread.join();
} catch (InterruptedException e) {
log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage());
}
}
@Override
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName());
private final Tenants tenants;
private final RpcServer server;
private final Thread serverThread;
private final Deployer deployer;
private final VersionState versionState;
@SuppressWarnings("UnusedParameters")
@Inject
public ConfigServerBootstrap(Tenants tenants, RpcServer server, Deployer deployer, VersionState versionState) {
this.tenants = tenants;
this.server = server;
this.deployer = deployer;
this.versionState = versionState;
this.serverThread = new Thread(this, "configserver main");
serverThread.start();
}
@Override
public void deconstruct() {
log.log(LogLevel.INFO, "Stopping config server");
server.stop();
try {
serverThread.join();
} catch (InterruptedException e) {
log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage());
}
}
@Override
} |
Yes, good question, and I have checked that no other components have ConfigServerBootstrap as constructor argument. | public void run() {
if (versionState.isUpgraded()) {
log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications");
tenants.redeployApplications(deployer);
log.log(LogLevel.INFO, "All applications redeployed");
}
versionState.saveNewVersion();
log.log(LogLevel.DEBUG, "Starting RPC server");
server.run();
log.log(LogLevel.DEBUG, "RPC server stopped");
} | versionState.saveNewVersion(); | public void run() {
if (versionState.isUpgraded()) {
log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications");
tenants.redeployApplications(deployer);
log.log(LogLevel.INFO, "All applications redeployed");
}
versionState.saveNewVersion();
log.log(LogLevel.DEBUG, "Starting RPC server");
server.run();
log.log(LogLevel.DEBUG, "RPC server stopped");
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName());
private final Tenants tenants;
private final RpcServer server;
private final Thread serverThread;
private final Deployer deployer;
private final VersionState versionState;
@SuppressWarnings("UnusedParameters")
@Inject
public ConfigServerBootstrap(Tenants tenants, RpcServer server, Deployer deployer, VersionState versionState) {
this.tenants = tenants;
this.server = server;
this.deployer = deployer;
this.versionState = versionState;
this.serverThread = new Thread(this, "configserver main");
serverThread.start();
}
@Override
public void deconstruct() {
log.log(LogLevel.INFO, "Stopping config server");
server.stop();
try {
serverThread.join();
} catch (InterruptedException e) {
log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage());
}
}
@Override
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName());
private final Tenants tenants;
private final RpcServer server;
private final Thread serverThread;
private final Deployer deployer;
private final VersionState versionState;
@SuppressWarnings("UnusedParameters")
@Inject
public ConfigServerBootstrap(Tenants tenants, RpcServer server, Deployer deployer, VersionState versionState) {
this.tenants = tenants;
this.server = server;
this.deployer = deployer;
this.versionState = versionState;
this.serverThread = new Thread(this, "configserver main");
serverThread.start();
}
@Override
public void deconstruct() {
log.log(LogLevel.INFO, "Stopping config server");
server.stop();
try {
serverThread.join();
} catch (InterruptedException e) {
log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage());
}
}
@Override
} |
👍 | public void run() {
if (versionState.isUpgraded()) {
log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications");
tenants.redeployApplications(deployer);
log.log(LogLevel.INFO, "All applications redeployed");
}
versionState.saveNewVersion();
log.log(LogLevel.DEBUG, "Starting RPC server");
server.run();
log.log(LogLevel.DEBUG, "RPC server stopped");
} | versionState.saveNewVersion(); | public void run() {
if (versionState.isUpgraded()) {
log.log(LogLevel.INFO, "Configserver upgraded from " + versionState.storedVersion() + " to " + versionState.currentVersion() + ". Redeploying all applications");
tenants.redeployApplications(deployer);
log.log(LogLevel.INFO, "All applications redeployed");
}
versionState.saveNewVersion();
log.log(LogLevel.DEBUG, "Starting RPC server");
server.run();
log.log(LogLevel.DEBUG, "RPC server stopped");
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName());
private final Tenants tenants;
private final RpcServer server;
private final Thread serverThread;
private final Deployer deployer;
private final VersionState versionState;
@SuppressWarnings("UnusedParameters")
@Inject
public ConfigServerBootstrap(Tenants tenants, RpcServer server, Deployer deployer, VersionState versionState) {
this.tenants = tenants;
this.server = server;
this.deployer = deployer;
this.versionState = versionState;
this.serverThread = new Thread(this, "configserver main");
serverThread.start();
}
@Override
public void deconstruct() {
log.log(LogLevel.INFO, "Stopping config server");
server.stop();
try {
serverThread.join();
} catch (InterruptedException e) {
log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage());
}
}
@Override
} | class ConfigServerBootstrap extends AbstractComponent implements Runnable {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigServerBootstrap.class.getName());
private final Tenants tenants;
private final RpcServer server;
private final Thread serverThread;
private final Deployer deployer;
private final VersionState versionState;
@SuppressWarnings("UnusedParameters")
@Inject
public ConfigServerBootstrap(Tenants tenants, RpcServer server, Deployer deployer, VersionState versionState) {
this.tenants = tenants;
this.server = server;
this.deployer = deployer;
this.versionState = versionState;
this.serverThread = new Thread(this, "configserver main");
serverThread.start();
}
@Override
public void deconstruct() {
log.log(LogLevel.INFO, "Stopping config server");
server.stop();
try {
serverThread.join();
} catch (InterruptedException e) {
log.log(LogLevel.WARNING, "Error joining server thread on shutdown: " + e.getMessage());
}
}
@Override
} |
In case the key has been garbage collected, but after some more research is seems like GCed keys are filtered out. | private Stream<DeactivatedContainer> deactivatedContainerStream() {
synchronized (lock) {
return activeContainers.entrySet().stream()
.filter(e -> e.getKey() != null)
.filter(e -> !e.getValue().isDeactivated())
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactived));
}
} | .filter(e -> e.getKey() != null) | private Stream<DeactivatedContainer> deactivatedContainerStream() {
synchronized (lock) {
return activeContainers.entrySet().stream()
.filter(e -> e.getValue().isDeactivated())
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactivated));
}
} | class ActiveContainerStatistics {
public interface Metrics {
String TOTAL_DEACTIVATED_CONTAINERS = "jdisc.deactivated_containers.total";
String DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES = "jdisc.deactivated_containers.with_retained_refs";
}
private static final Logger log = Logger.getLogger(ActiveContainerStatistics.class.getName());
private final WeakHashMap<ActiveContainer, ActiveContainerStats> activeContainers = new WeakHashMap<>();
private final Object lock = new Object();
public void onActivated(ActiveContainer activeContainer) {
synchronized (lock) {
activeContainers.put(activeContainer, new ActiveContainerStats(Instant.now()));
}
}
public void onDeactivated(ActiveContainer activeContainer) {
synchronized (lock) {
ActiveContainerStats containerStats = activeContainers.get(activeContainer);
if (containerStats == null) {
throw new IllegalStateException("onActivated() has not been called for container: " + activeContainer);
}
containerStats.setTimeDeactived(Instant.now());
}
}
public void outputMetrics(Metric metric) {
synchronized (lock) {
DeactivatedContainerMetrics metrics = deactivatedContainerStream()
.collect(
DeactivatedContainerMetrics::new,
DeactivatedContainerMetrics::aggregate,
DeactivatedContainerMetrics::merge);
metric.set(Metrics.TOTAL_DEACTIVATED_CONTAINERS, metrics.deactivatedContainerCount, null);
metric.set(Metrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, metrics.deactivatedContainersWithRetainedRefsCount, null);
}
}
public void printSummaryToLog() {
synchronized (lock) {
List<DeactivatedContainer> deactivatedContainers = deactivatedContainerStream().collect(toList());
if (deactivatedContainers.isEmpty()) {
return;
}
log.warning(
"Multiple instances of ActiveContainer leaked! " + deactivatedContainers.size() +
" instances are still present.");
deactivatedContainers.stream()
.map(c -> " - " + c.toSummaryString())
.forEach(log::warning);
}
}
private static class ActiveContainerStats {
public final Instant timeActivated;
public Instant timeDeactived;
public ActiveContainerStats(Instant timeActivated) {
this.timeActivated = timeActivated;
}
public void setTimeDeactived(Instant instant) {
this.timeDeactived = instant;
}
public boolean isDeactivated() {
return timeDeactived == null;
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final Instant timeActivated;
public final Instant timeDeactivated;
public DeactivatedContainer(ActiveContainer activeContainer, Instant timeActivated, Instant timeDeactivated) {
this.activeContainer = activeContainer;
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public String toSummaryString() {
return String.format("%s: timeActivated=%s, timeDeactivated=%s, retainCount=%d",
activeContainer.toString(),
timeActivated.toString(),
timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class DeactivatedContainerMetrics {
public int deactivatedContainerCount = 0;
public int deactivatedContainersWithRetainedRefsCount = 0;
public void aggregate(DeactivatedContainer deactivatedContainer) {
++deactivatedContainerCount;
if (deactivatedContainer.activeContainer.retainCount() > 0) {
++deactivatedContainersWithRetainedRefsCount;
}
}
public DeactivatedContainerMetrics merge(DeactivatedContainerMetrics other) {
deactivatedContainerCount += other.deactivatedContainerCount;
deactivatedContainersWithRetainedRefsCount += other.deactivatedContainersWithRetainedRefsCount;
return this;
}
}
} | class only constructible from this package
public void emitMetrics(Metric metric) {
synchronized (lock) {
DeactivatedContainerMetrics metrics = deactivatedContainerStream()
.collect(
DeactivatedContainerMetrics::new,
DeactivatedContainerMetrics::aggregate,
DeactivatedContainerMetrics::merge);
metric.set(Metrics.TOTAL_DEACTIVATED_CONTAINERS, metrics.deactivatedContainerCount, null);
metric.set(Metrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, metrics.deactivatedContainersWithRetainedRefsCount, null);
}
} |
Perhaps you should bring the information from the nfe further too ? | public static Struct fromString(String str) {
try {
DegreesParser d = new DegreesParser(str);
return valueOf((int)(d.longitude * 1000000), (int)(d.latitude * 1000000));
} catch (IllegalArgumentException e) {
try {
String[] arr = str.split(";");
if (arr.length == 2) {
int x = Integer.parseInt(arr[0]);
int y = Integer.parseInt(arr[1]);
return valueOf(x, y);
}
} catch (NumberFormatException nfe) {
}
throw new IllegalArgumentException("Could not parse '"+str+"' as geo coordinates: "+e.getMessage());
}
} | public static Struct fromString(String str) {
try {
DegreesParser d = new DegreesParser(str);
return valueOf((int)(d.longitude * 1000000), (int)(d.latitude * 1000000));
} catch (IllegalArgumentException e) {
try {
String[] arr = str.split(";");
if (arr.length == 2) {
int x = Integer.parseInt(arr[0]);
int y = Integer.parseInt(arr[1]);
return valueOf(x, y);
}
} catch (NumberFormatException nfe) {
}
throw new IllegalArgumentException("Could not parse '"+str+"' as geo coordinates: "+e.getMessage());
}
} | class PositionDataType {
public static final StructDataType INSTANCE = newInstance();
public static final String STRUCT_NAME = "position";
public static final String FIELD_X = "x";
public static final String FIELD_Y = "y";
private static final Field FFIELD_X = INSTANCE.getField(FIELD_X);
private static final Field FFIELD_Y = INSTANCE.getField(FIELD_Y);
private static final DecimalFormat degreeFmt;
static {
degreeFmt = new DecimalFormat("0.0
degreeFmt.setMinimumIntegerDigits(1);
degreeFmt.setMinimumFractionDigits(1);
degreeFmt.setMaximumFractionDigits(6);
}
static String fmtD(double degrees) {
return degreeFmt.format(degrees);
}
private PositionDataType() {
}
public static String renderAsString(Struct pos) {
StringBuilder buf = new StringBuilder();
double ns = getYValue(pos).getInteger() / 1.0e6;
double ew = getXValue(pos).getInteger() / 1.0e6;
buf.append(ns < 0 ? "S" : "N");
buf.append(fmtD(ns < 0 ? (-ns) : ns));
buf.append(";");
buf.append(ew < 0 ? "W" : "E");
buf.append(fmtD(ew < 0 ? (-ew) : ew));
return buf.toString();
}
public static void renderXml(Struct pos, XmlStream target) {
target.addContent(renderAsString(pos));
}
public static Struct valueOf(Integer x, Integer y) {
Struct ret = new Struct(INSTANCE);
ret.setFieldValue(FIELD_X, x != null ? new IntegerFieldValue(x) : null);
ret.setFieldValue(FIELD_Y, y != null ? new IntegerFieldValue(y) : null);
return ret;
}
public static Struct fromLong(long val) {
return valueOf((int)(val >> 32), (int)val);
}
public static IntegerFieldValue getXValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_X, IntegerFieldValue.class);
}
public static IntegerFieldValue getYValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_Y, IntegerFieldValue.class);
}
public static String getZCurveFieldName(String fieldName) {
return fieldName + "_zcurve";
}
public static String getPositionSummaryFieldName(String fieldName) {
return fieldName + ".position";
}
public static String getDistanceSummaryFieldName(String fieldName) {
return fieldName + ".distance";
}
private static StructDataType newInstance() {
StructDataType ret = new StructDataType(STRUCT_NAME);
ret.addField(new Field(FIELD_X, DataType.INT));
ret.addField(new Field(FIELD_Y, DataType.INT));
return ret;
}
} | class PositionDataType {
public static final StructDataType INSTANCE = newInstance();
public static final String STRUCT_NAME = "position";
public static final String FIELD_X = "x";
public static final String FIELD_Y = "y";
private static final Field FFIELD_X = INSTANCE.getField(FIELD_X);
private static final Field FFIELD_Y = INSTANCE.getField(FIELD_Y);
private static final DecimalFormat degreeFmt;
static {
degreeFmt = new DecimalFormat("0.0
degreeFmt.setMinimumIntegerDigits(1);
degreeFmt.setMinimumFractionDigits(1);
degreeFmt.setMaximumFractionDigits(6);
}
static String fmtD(double degrees) {
return degreeFmt.format(degrees);
}
private PositionDataType() {
}
public static String renderAsString(Struct pos) {
StringBuilder buf = new StringBuilder();
double ns = getYValue(pos).getInteger() / 1.0e6;
double ew = getXValue(pos).getInteger() / 1.0e6;
buf.append(ns < 0 ? "S" : "N");
buf.append(fmtD(ns < 0 ? (-ns) : ns));
buf.append(";");
buf.append(ew < 0 ? "W" : "E");
buf.append(fmtD(ew < 0 ? (-ew) : ew));
return buf.toString();
}
public static void renderXml(Struct pos, XmlStream target) {
target.addContent(renderAsString(pos));
}
public static Struct valueOf(Integer x, Integer y) {
Struct ret = new Struct(INSTANCE);
ret.setFieldValue(FIELD_X, x != null ? new IntegerFieldValue(x) : null);
ret.setFieldValue(FIELD_Y, y != null ? new IntegerFieldValue(y) : null);
return ret;
}
public static Struct fromLong(long val) {
return valueOf((int)(val >> 32), (int)val);
}
public static IntegerFieldValue getXValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_X, IntegerFieldValue.class);
}
public static IntegerFieldValue getYValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_Y, IntegerFieldValue.class);
}
public static String getZCurveFieldName(String fieldName) {
return fieldName + "_zcurve";
}
public static String getPositionSummaryFieldName(String fieldName) {
return fieldName + ".position";
}
public static String getDistanceSummaryFieldName(String fieldName) {
return fieldName + ".distance";
}
private static StructDataType newInstance() {
StructDataType ret = new StructDataType(STRUCT_NAME);
ret.addField(new Field(FIELD_X, DataType.INT));
ret.addField(new Field(FIELD_Y, DataType.INT));
return ret;
}
} | |
i did consider also showing nfe but it seems that would just be confusing to customers (the pair-of-integers format is just internal and probably undocumented) | public static Struct fromString(String str) {
try {
DegreesParser d = new DegreesParser(str);
return valueOf((int)(d.longitude * 1000000), (int)(d.latitude * 1000000));
} catch (IllegalArgumentException e) {
try {
String[] arr = str.split(";");
if (arr.length == 2) {
int x = Integer.parseInt(arr[0]);
int y = Integer.parseInt(arr[1]);
return valueOf(x, y);
}
} catch (NumberFormatException nfe) {
}
throw new IllegalArgumentException("Could not parse '"+str+"' as geo coordinates: "+e.getMessage());
}
} | public static Struct fromString(String str) {
try {
DegreesParser d = new DegreesParser(str);
return valueOf((int)(d.longitude * 1000000), (int)(d.latitude * 1000000));
} catch (IllegalArgumentException e) {
try {
String[] arr = str.split(";");
if (arr.length == 2) {
int x = Integer.parseInt(arr[0]);
int y = Integer.parseInt(arr[1]);
return valueOf(x, y);
}
} catch (NumberFormatException nfe) {
}
throw new IllegalArgumentException("Could not parse '"+str+"' as geo coordinates: "+e.getMessage());
}
} | class PositionDataType {
public static final StructDataType INSTANCE = newInstance();
public static final String STRUCT_NAME = "position";
public static final String FIELD_X = "x";
public static final String FIELD_Y = "y";
private static final Field FFIELD_X = INSTANCE.getField(FIELD_X);
private static final Field FFIELD_Y = INSTANCE.getField(FIELD_Y);
private static final DecimalFormat degreeFmt;
static {
degreeFmt = new DecimalFormat("0.0
degreeFmt.setMinimumIntegerDigits(1);
degreeFmt.setMinimumFractionDigits(1);
degreeFmt.setMaximumFractionDigits(6);
}
static String fmtD(double degrees) {
return degreeFmt.format(degrees);
}
private PositionDataType() {
}
public static String renderAsString(Struct pos) {
StringBuilder buf = new StringBuilder();
double ns = getYValue(pos).getInteger() / 1.0e6;
double ew = getXValue(pos).getInteger() / 1.0e6;
buf.append(ns < 0 ? "S" : "N");
buf.append(fmtD(ns < 0 ? (-ns) : ns));
buf.append(";");
buf.append(ew < 0 ? "W" : "E");
buf.append(fmtD(ew < 0 ? (-ew) : ew));
return buf.toString();
}
public static void renderXml(Struct pos, XmlStream target) {
target.addContent(renderAsString(pos));
}
public static Struct valueOf(Integer x, Integer y) {
Struct ret = new Struct(INSTANCE);
ret.setFieldValue(FIELD_X, x != null ? new IntegerFieldValue(x) : null);
ret.setFieldValue(FIELD_Y, y != null ? new IntegerFieldValue(y) : null);
return ret;
}
public static Struct fromLong(long val) {
return valueOf((int)(val >> 32), (int)val);
}
public static IntegerFieldValue getXValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_X, IntegerFieldValue.class);
}
public static IntegerFieldValue getYValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_Y, IntegerFieldValue.class);
}
public static String getZCurveFieldName(String fieldName) {
return fieldName + "_zcurve";
}
public static String getPositionSummaryFieldName(String fieldName) {
return fieldName + ".position";
}
public static String getDistanceSummaryFieldName(String fieldName) {
return fieldName + ".distance";
}
private static StructDataType newInstance() {
StructDataType ret = new StructDataType(STRUCT_NAME);
ret.addField(new Field(FIELD_X, DataType.INT));
ret.addField(new Field(FIELD_Y, DataType.INT));
return ret;
}
} | class PositionDataType {
public static final StructDataType INSTANCE = newInstance();
public static final String STRUCT_NAME = "position";
public static final String FIELD_X = "x";
public static final String FIELD_Y = "y";
private static final Field FFIELD_X = INSTANCE.getField(FIELD_X);
private static final Field FFIELD_Y = INSTANCE.getField(FIELD_Y);
private static final DecimalFormat degreeFmt;
static {
degreeFmt = new DecimalFormat("0.0
degreeFmt.setMinimumIntegerDigits(1);
degreeFmt.setMinimumFractionDigits(1);
degreeFmt.setMaximumFractionDigits(6);
}
static String fmtD(double degrees) {
return degreeFmt.format(degrees);
}
private PositionDataType() {
}
public static String renderAsString(Struct pos) {
StringBuilder buf = new StringBuilder();
double ns = getYValue(pos).getInteger() / 1.0e6;
double ew = getXValue(pos).getInteger() / 1.0e6;
buf.append(ns < 0 ? "S" : "N");
buf.append(fmtD(ns < 0 ? (-ns) : ns));
buf.append(";");
buf.append(ew < 0 ? "W" : "E");
buf.append(fmtD(ew < 0 ? (-ew) : ew));
return buf.toString();
}
public static void renderXml(Struct pos, XmlStream target) {
target.addContent(renderAsString(pos));
}
public static Struct valueOf(Integer x, Integer y) {
Struct ret = new Struct(INSTANCE);
ret.setFieldValue(FIELD_X, x != null ? new IntegerFieldValue(x) : null);
ret.setFieldValue(FIELD_Y, y != null ? new IntegerFieldValue(y) : null);
return ret;
}
public static Struct fromLong(long val) {
return valueOf((int)(val >> 32), (int)val);
}
public static IntegerFieldValue getXValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_X, IntegerFieldValue.class);
}
public static IntegerFieldValue getYValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_Y, IntegerFieldValue.class);
}
public static String getZCurveFieldName(String fieldName) {
return fieldName + "_zcurve";
}
public static String getPositionSummaryFieldName(String fieldName) {
return fieldName + ".position";
}
public static String getDistanceSummaryFieldName(String fieldName) {
return fieldName + ".distance";
}
private static StructDataType newInstance() {
StructDataType ret = new StructDataType(STRUCT_NAME);
ret.addField(new Field(FIELD_X, DataType.INT));
ret.addField(new Field(FIELD_Y, DataType.INT));
return ret;
}
} | |
Ok | public static Struct fromString(String str) {
try {
DegreesParser d = new DegreesParser(str);
return valueOf((int)(d.longitude * 1000000), (int)(d.latitude * 1000000));
} catch (IllegalArgumentException e) {
try {
String[] arr = str.split(";");
if (arr.length == 2) {
int x = Integer.parseInt(arr[0]);
int y = Integer.parseInt(arr[1]);
return valueOf(x, y);
}
} catch (NumberFormatException nfe) {
}
throw new IllegalArgumentException("Could not parse '"+str+"' as geo coordinates: "+e.getMessage());
}
} | public static Struct fromString(String str) {
try {
DegreesParser d = new DegreesParser(str);
return valueOf((int)(d.longitude * 1000000), (int)(d.latitude * 1000000));
} catch (IllegalArgumentException e) {
try {
String[] arr = str.split(";");
if (arr.length == 2) {
int x = Integer.parseInt(arr[0]);
int y = Integer.parseInt(arr[1]);
return valueOf(x, y);
}
} catch (NumberFormatException nfe) {
}
throw new IllegalArgumentException("Could not parse '"+str+"' as geo coordinates: "+e.getMessage());
}
} | class PositionDataType {
public static final StructDataType INSTANCE = newInstance();
public static final String STRUCT_NAME = "position";
public static final String FIELD_X = "x";
public static final String FIELD_Y = "y";
private static final Field FFIELD_X = INSTANCE.getField(FIELD_X);
private static final Field FFIELD_Y = INSTANCE.getField(FIELD_Y);
private static final DecimalFormat degreeFmt;
static {
degreeFmt = new DecimalFormat("0.0
degreeFmt.setMinimumIntegerDigits(1);
degreeFmt.setMinimumFractionDigits(1);
degreeFmt.setMaximumFractionDigits(6);
}
static String fmtD(double degrees) {
return degreeFmt.format(degrees);
}
private PositionDataType() {
}
public static String renderAsString(Struct pos) {
StringBuilder buf = new StringBuilder();
double ns = getYValue(pos).getInteger() / 1.0e6;
double ew = getXValue(pos).getInteger() / 1.0e6;
buf.append(ns < 0 ? "S" : "N");
buf.append(fmtD(ns < 0 ? (-ns) : ns));
buf.append(";");
buf.append(ew < 0 ? "W" : "E");
buf.append(fmtD(ew < 0 ? (-ew) : ew));
return buf.toString();
}
public static void renderXml(Struct pos, XmlStream target) {
target.addContent(renderAsString(pos));
}
public static Struct valueOf(Integer x, Integer y) {
Struct ret = new Struct(INSTANCE);
ret.setFieldValue(FIELD_X, x != null ? new IntegerFieldValue(x) : null);
ret.setFieldValue(FIELD_Y, y != null ? new IntegerFieldValue(y) : null);
return ret;
}
public static Struct fromLong(long val) {
return valueOf((int)(val >> 32), (int)val);
}
public static IntegerFieldValue getXValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_X, IntegerFieldValue.class);
}
public static IntegerFieldValue getYValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_Y, IntegerFieldValue.class);
}
public static String getZCurveFieldName(String fieldName) {
return fieldName + "_zcurve";
}
public static String getPositionSummaryFieldName(String fieldName) {
return fieldName + ".position";
}
public static String getDistanceSummaryFieldName(String fieldName) {
return fieldName + ".distance";
}
private static StructDataType newInstance() {
StructDataType ret = new StructDataType(STRUCT_NAME);
ret.addField(new Field(FIELD_X, DataType.INT));
ret.addField(new Field(FIELD_Y, DataType.INT));
return ret;
}
} | class PositionDataType {
public static final StructDataType INSTANCE = newInstance();
public static final String STRUCT_NAME = "position";
public static final String FIELD_X = "x";
public static final String FIELD_Y = "y";
private static final Field FFIELD_X = INSTANCE.getField(FIELD_X);
private static final Field FFIELD_Y = INSTANCE.getField(FIELD_Y);
private static final DecimalFormat degreeFmt;
static {
degreeFmt = new DecimalFormat("0.0
degreeFmt.setMinimumIntegerDigits(1);
degreeFmt.setMinimumFractionDigits(1);
degreeFmt.setMaximumFractionDigits(6);
}
static String fmtD(double degrees) {
return degreeFmt.format(degrees);
}
private PositionDataType() {
}
public static String renderAsString(Struct pos) {
StringBuilder buf = new StringBuilder();
double ns = getYValue(pos).getInteger() / 1.0e6;
double ew = getXValue(pos).getInteger() / 1.0e6;
buf.append(ns < 0 ? "S" : "N");
buf.append(fmtD(ns < 0 ? (-ns) : ns));
buf.append(";");
buf.append(ew < 0 ? "W" : "E");
buf.append(fmtD(ew < 0 ? (-ew) : ew));
return buf.toString();
}
public static void renderXml(Struct pos, XmlStream target) {
target.addContent(renderAsString(pos));
}
public static Struct valueOf(Integer x, Integer y) {
Struct ret = new Struct(INSTANCE);
ret.setFieldValue(FIELD_X, x != null ? new IntegerFieldValue(x) : null);
ret.setFieldValue(FIELD_Y, y != null ? new IntegerFieldValue(y) : null);
return ret;
}
public static Struct fromLong(long val) {
return valueOf((int)(val >> 32), (int)val);
}
public static IntegerFieldValue getXValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_X, IntegerFieldValue.class);
}
public static IntegerFieldValue getYValue(FieldValue pos) {
return Struct.getFieldValue(pos, INSTANCE, FFIELD_Y, IntegerFieldValue.class);
}
public static String getZCurveFieldName(String fieldName) {
return fieldName + "_zcurve";
}
public static String getPositionSummaryFieldName(String fieldName) {
return fieldName + ".position";
}
public static String getDistanceSummaryFieldName(String fieldName) {
return fieldName + ".distance";
}
private static StructDataType newInstance() {
StructDataType ret = new StructDataType(STRUCT_NAME);
ret.addField(new Field(FIELD_X, DataType.INT));
ret.addField(new Field(FIELD_Y, DataType.INT));
return ret;
}
} | |
I suggest testing that the duplicated output is at least as long as the 'original' input and that the data is the same (for the common prefix) | public void testPipedInput() throws IOException {
PipedOutputStream input = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(input);
ByteArrayOutputStream gotten = new ByteArrayOutputStream();
TeeInputStream tee = new TeeInputStream(in, gotten);
input.write("first input".getBytes(StandardCharsets.UTF_8));
int b = tee.read();
assertThat(b, is((int)'f'));
assertThat(gotten.toString(), is("first input"));
input.write(" second input".getBytes(StandardCharsets.UTF_8));
b = tee.read();
assertThat(b, is((int)'i'));
assertThat(gotten.toString(), is("first input second input"));
new Thread(new Generator(input)).start();
b = tee.read();
assertThat(b, is((int)'r'));
byte[] ba = new byte[9];
for (int i = 0; i < 12345; i++) {
b = tee.read();
assertThat(b, is(greaterThan(0)));
assertThat(tee.read(ba), is(greaterThan(0)));
}
tee.close();
String got = gotten.toString();
assertThat(got.length(), is(greaterThan(34567)));
} | } | public void testPipedInput() throws IOException {
PipedOutputStream input = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(input);
ByteArrayOutputStream gotten = new ByteArrayOutputStream();
ByteArrayOutputStream output = new ByteArrayOutputStream();
TeeInputStream tee = new TeeInputStream(in, gotten);
input.write("first input".getBytes(StandardCharsets.UTF_8));
int b = tee.read();
assertThat(b, is((int)'f'));
output.write(b);
assertThat(gotten.toString(), is("first input"));
input.write(" second input".getBytes(StandardCharsets.UTF_8));
b = tee.read();
assertThat(b, is((int)'i'));
output.write(b);
assertThat(gotten.toString(), is("first input second input"));
new Thread(new Generator(input)).start();
b = tee.read();
assertThat(b, is((int)'r'));
output.write(b);
byte[] ba = new byte[9];
for (int i = 0; i < 12345; i++) {
b = tee.read();
assertThat(b, is(greaterThan(0)));
output.write(b);
int l = tee.read(ba);
assertThat(l, is(greaterThan(0)));
output.write(ba, 0, l);
l = tee.read(ba, 3, 3);
assertThat(l, is(greaterThan(0)));
output.write(ba, 3, l);
}
tee.close();
String got = gotten.toString();
assertThat(got.length(), is(greaterThan(34567)));
assertTrue(got.startsWith(output.toString()));
} | class Generator implements Runnable {
private OutputStream dst;
public Generator(OutputStream dst) { this.dst = dst; }
public @Override void run() {
for (int i = 0; i < 123456789; i++) {
int b = i & 0x7f;
if (b < 32) continue;
if (b > 126) b = '\n';
try {
dst.write(b);
} catch (IOException e) {
return;
}
}
}
} | class Generator implements Runnable {
private OutputStream dst;
public Generator(OutputStream dst) { this.dst = dst; }
@Override
public void run() {
for (int i = 0; i < 123456789; i++) {
int b = i & 0x7f;
if (b < 32) continue;
if (b > 126) b = '\n';
try {
dst.write(b);
} catch (IOException e) {
return;
}
}
}
} |
consider copying read bytes into an output array and comparing with (expected) input | public void testSimpleInput() throws IOException {
byte[] input = "very simple input".getBytes(StandardCharsets.UTF_8);
ByteArrayInputStream in = new ByteArrayInputStream(input);
ByteArrayOutputStream gotten = new ByteArrayOutputStream();
TeeInputStream tee = new TeeInputStream(in, gotten);
int b = tee.read();
assertThat(b, is((int)'v'));
assertThat(gotten.toString(), is("very simple input"));
for (int i = 0; i < 16; i++) {
b = tee.read();
assertThat(b, is(greaterThan(0)));
}
assertThat(tee.read(), is(-1));
} | assertThat(b, is(greaterThan(0))); | public void testSimpleInput() throws IOException {
byte[] input = "very simple input".getBytes(StandardCharsets.UTF_8);
ByteArrayInputStream in = new ByteArrayInputStream(input);
ByteArrayOutputStream gotten = new ByteArrayOutputStream();
ByteArrayOutputStream output = new ByteArrayOutputStream();
TeeInputStream tee = new TeeInputStream(in, gotten);
int b = tee.read();
assertThat(b, is((int)'v'));
output.write(b);
assertThat(gotten.toString(), is("very simple input"));
for (int i = 0; i < 16; i++) {
b = tee.read();
assertThat(b, is(greaterThan(0)));
output.write(b);
}
assertThat(tee.read(), is(-1));
assertThat(gotten.toString(), is("very simple input"));
assertThat(output.toString(), is("very simple input"));
} | class TeeInputStreamTest {
@Test
private class Generator implements Runnable {
private OutputStream dst;
public Generator(OutputStream dst) { this.dst = dst; }
public @Override void run() {
for (int i = 0; i < 123456789; i++) {
int b = i & 0x7f;
if (b < 32) continue;
if (b > 126) b = '\n';
try {
dst.write(b);
} catch (IOException e) {
return;
}
}
}
}
@Test
public void testPipedInput() throws IOException {
PipedOutputStream input = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(input);
ByteArrayOutputStream gotten = new ByteArrayOutputStream();
TeeInputStream tee = new TeeInputStream(in, gotten);
input.write("first input".getBytes(StandardCharsets.UTF_8));
int b = tee.read();
assertThat(b, is((int)'f'));
assertThat(gotten.toString(), is("first input"));
input.write(" second input".getBytes(StandardCharsets.UTF_8));
b = tee.read();
assertThat(b, is((int)'i'));
assertThat(gotten.toString(), is("first input second input"));
new Thread(new Generator(input)).start();
b = tee.read();
assertThat(b, is((int)'r'));
byte[] ba = new byte[9];
for (int i = 0; i < 12345; i++) {
b = tee.read();
assertThat(b, is(greaterThan(0)));
assertThat(tee.read(ba), is(greaterThan(0)));
}
tee.close();
String got = gotten.toString();
assertThat(got.length(), is(greaterThan(34567)));
}
} | class TeeInputStreamTest {
@Test
private class Generator implements Runnable {
private OutputStream dst;
public Generator(OutputStream dst) { this.dst = dst; }
@Override
public void run() {
for (int i = 0; i < 123456789; i++) {
int b = i & 0x7f;
if (b < 32) continue;
if (b > 126) b = '\n';
try {
dst.write(b);
} catch (IOException e) {
return;
}
}
}
}
@Test
public void testPipedInput() throws IOException {
PipedOutputStream input = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(input);
ByteArrayOutputStream gotten = new ByteArrayOutputStream();
ByteArrayOutputStream output = new ByteArrayOutputStream();
TeeInputStream tee = new TeeInputStream(in, gotten);
input.write("first input".getBytes(StandardCharsets.UTF_8));
int b = tee.read();
assertThat(b, is((int)'f'));
output.write(b);
assertThat(gotten.toString(), is("first input"));
input.write(" second input".getBytes(StandardCharsets.UTF_8));
b = tee.read();
assertThat(b, is((int)'i'));
output.write(b);
assertThat(gotten.toString(), is("first input second input"));
new Thread(new Generator(input)).start();
b = tee.read();
assertThat(b, is((int)'r'));
output.write(b);
byte[] ba = new byte[9];
for (int i = 0; i < 12345; i++) {
b = tee.read();
assertThat(b, is(greaterThan(0)));
output.write(b);
int l = tee.read(ba);
assertThat(l, is(greaterThan(0)));
output.write(ba, 0, l);
l = tee.read(ba, 3, 3);
assertThat(l, is(greaterThan(0)));
output.write(ba, 3, l);
}
tee.close();
String got = gotten.toString();
assertThat(got.length(), is(greaterThan(34567)));
assertTrue(got.startsWith(output.toString()));
}
} |
consider also reading to array with offset and length | public void testPipedInput() throws IOException {
PipedOutputStream input = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(input);
ByteArrayOutputStream gotten = new ByteArrayOutputStream();
TeeInputStream tee = new TeeInputStream(in, gotten);
input.write("first input".getBytes(StandardCharsets.UTF_8));
int b = tee.read();
assertThat(b, is((int)'f'));
assertThat(gotten.toString(), is("first input"));
input.write(" second input".getBytes(StandardCharsets.UTF_8));
b = tee.read();
assertThat(b, is((int)'i'));
assertThat(gotten.toString(), is("first input second input"));
new Thread(new Generator(input)).start();
b = tee.read();
assertThat(b, is((int)'r'));
byte[] ba = new byte[9];
for (int i = 0; i < 12345; i++) {
b = tee.read();
assertThat(b, is(greaterThan(0)));
assertThat(tee.read(ba), is(greaterThan(0)));
}
tee.close();
String got = gotten.toString();
assertThat(got.length(), is(greaterThan(34567)));
} | assertThat(tee.read(ba), is(greaterThan(0))); | public void testPipedInput() throws IOException {
PipedOutputStream input = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(input);
ByteArrayOutputStream gotten = new ByteArrayOutputStream();
ByteArrayOutputStream output = new ByteArrayOutputStream();
TeeInputStream tee = new TeeInputStream(in, gotten);
input.write("first input".getBytes(StandardCharsets.UTF_8));
int b = tee.read();
assertThat(b, is((int)'f'));
output.write(b);
assertThat(gotten.toString(), is("first input"));
input.write(" second input".getBytes(StandardCharsets.UTF_8));
b = tee.read();
assertThat(b, is((int)'i'));
output.write(b);
assertThat(gotten.toString(), is("first input second input"));
new Thread(new Generator(input)).start();
b = tee.read();
assertThat(b, is((int)'r'));
output.write(b);
byte[] ba = new byte[9];
for (int i = 0; i < 12345; i++) {
b = tee.read();
assertThat(b, is(greaterThan(0)));
output.write(b);
int l = tee.read(ba);
assertThat(l, is(greaterThan(0)));
output.write(ba, 0, l);
l = tee.read(ba, 3, 3);
assertThat(l, is(greaterThan(0)));
output.write(ba, 3, l);
}
tee.close();
String got = gotten.toString();
assertThat(got.length(), is(greaterThan(34567)));
assertTrue(got.startsWith(output.toString()));
} | class Generator implements Runnable {
private OutputStream dst;
public Generator(OutputStream dst) { this.dst = dst; }
public @Override void run() {
for (int i = 0; i < 123456789; i++) {
int b = i & 0x7f;
if (b < 32) continue;
if (b > 126) b = '\n';
try {
dst.write(b);
} catch (IOException e) {
return;
}
}
}
} | class Generator implements Runnable {
private OutputStream dst;
public Generator(OutputStream dst) { this.dst = dst; }
@Override
public void run() {
for (int i = 0; i < 123456789; i++) {
int b = i & 0x7f;
if (b < 32) continue;
if (b > 126) b = '\n';
try {
dst.write(b);
} catch (IOException e) {
return;
}
}
}
} |
Good. One might wonder why this was not added originally.. | protected void finalize() throws Throwable {
try {
int retainCount = retainCount();
if (retainCount > 0) {
log.warning(this + ".destroy() invoked from finalize() not through ApplicationLoader. " +
"This is an indication of either a resource leak or invalid use of reference counting. " +
"Retained references as this moment: " + retainCount);
destroy();
}
} finally {
super.finalize();
}
} | log.warning(this + ".destroy() invoked from finalize() not through ApplicationLoader. " + | protected void finalize() throws Throwable {
try {
int retainCount = retainCount();
if (retainCount > 0) {
log.warning(this + ".destroy() invoked from finalize() not through ApplicationLoader. " +
"This is an indication of either a resource leak or invalid use of reference counting. " +
"Retained references as this moment: " + retainCount);
destroy();
}
} finally {
super.finalize();
}
} | class ActiveContainer extends AbstractResource implements CurrentContainer {
private static final Logger log = Logger.getLogger(ActiveContainer.class.getName());
private final ContainerTermination termination;
private final Injector guiceInjector;
private final Iterable<ServerProvider> serverProviders;
private final ResourcePool resourceReferences = new ResourcePool();
private final Map<String, BindingSet<RequestHandler>> serverBindings;
private final Map<String, BindingSet<RequestHandler>> clientBindings;
private final BindingSetSelector bindingSetSelector;
private final TimeoutManagerImpl timeoutMgr;
public ActiveContainer(ContainerBuilder builder) {
serverProviders = builder.serverProviders().activate();
serverProviders.forEach(resourceReferences::retain);
serverBindings = builder.activateServerBindings();
serverBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
clientBindings = builder.activateClientBindings();
clientBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
bindingSetSelector = builder.getInstance(BindingSetSelector.class);
timeoutMgr = builder.getInstance(TimeoutManagerImpl.class);
timeoutMgr.start();
builder.guiceModules().install(new AbstractModule() {
@Override
protected void configure() {
bind(TimeoutManagerImpl.class).toInstance(timeoutMgr);
}
});
guiceInjector = builder.guiceModules().activate();
termination = new ContainerTermination(builder.appContext());
}
@Override
protected void destroy() {
resourceReferences.release();
timeoutMgr.shutdown();
termination.run();
}
@Override
/**
* Make this instance retain a reference to the resource until it is destroyed.
*/
void retainReference(SharedResource resource) {
resourceReferences.retain(resource);
}
public ContainerTermination shutdown() {
return termination;
}
public Injector guiceInjector() {
return guiceInjector;
}
public Iterable<ServerProvider> serverProviders() {
return serverProviders;
}
public Map<String, BindingSet<RequestHandler>> serverBindings() {
return serverBindings;
}
public BindingSet<RequestHandler> serverBindings(String setName) {
return serverBindings.get(setName);
}
public Map<String, BindingSet<RequestHandler>> clientBindings() {
return clientBindings;
}
public BindingSet<RequestHandler> clientBindings(String setName) {
return clientBindings.get(setName);
}
TimeoutManagerImpl timeoutManager() {
return timeoutMgr;
}
@Override
public ContainerSnapshot newReference(URI uri) {
String name = bindingSetSelector.select(uri);
if (name == null) {
throw new NoBindingSetSelectedException(uri);
}
BindingSet<RequestHandler> serverBindings = serverBindings(name);
BindingSet<RequestHandler> clientBindings = clientBindings(name);
if (serverBindings == null || clientBindings == null) {
throw new BindingSetNotFoundException(name);
}
return new ContainerSnapshot(this, serverBindings, clientBindings);
}
} | class ActiveContainer extends AbstractResource implements CurrentContainer {
private static final Logger log = Logger.getLogger(ActiveContainer.class.getName());
private final ContainerTermination termination;
private final Injector guiceInjector;
private final Iterable<ServerProvider> serverProviders;
private final ResourcePool resourceReferences = new ResourcePool();
private final Map<String, BindingSet<RequestHandler>> serverBindings;
private final Map<String, BindingSet<RequestHandler>> clientBindings;
private final BindingSetSelector bindingSetSelector;
private final TimeoutManagerImpl timeoutMgr;
public ActiveContainer(ContainerBuilder builder) {
serverProviders = builder.serverProviders().activate();
serverProviders.forEach(resourceReferences::retain);
serverBindings = builder.activateServerBindings();
serverBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
clientBindings = builder.activateClientBindings();
clientBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
bindingSetSelector = builder.getInstance(BindingSetSelector.class);
timeoutMgr = builder.getInstance(TimeoutManagerImpl.class);
timeoutMgr.start();
builder.guiceModules().install(new AbstractModule() {
@Override
protected void configure() {
bind(TimeoutManagerImpl.class).toInstance(timeoutMgr);
}
});
guiceInjector = builder.guiceModules().activate();
termination = new ContainerTermination(builder.appContext());
}
@Override
protected void destroy() {
resourceReferences.release();
timeoutMgr.shutdown();
termination.run();
}
@Override
/**
* Make this instance retain a reference to the resource until it is destroyed.
*/
void retainReference(SharedResource resource) {
resourceReferences.retain(resource);
}
public ContainerTermination shutdown() {
return termination;
}
public Injector guiceInjector() {
return guiceInjector;
}
public Iterable<ServerProvider> serverProviders() {
return serverProviders;
}
public Map<String, BindingSet<RequestHandler>> serverBindings() {
return serverBindings;
}
public BindingSet<RequestHandler> serverBindings(String setName) {
return serverBindings.get(setName);
}
public Map<String, BindingSet<RequestHandler>> clientBindings() {
return clientBindings;
}
public BindingSet<RequestHandler> clientBindings(String setName) {
return clientBindings.get(setName);
}
TimeoutManagerImpl timeoutManager() {
return timeoutMgr;
}
@Override
public ContainerSnapshot newReference(URI uri) {
String name = bindingSetSelector.select(uri);
if (name == null) {
throw new NoBindingSetSelectedException(uri);
}
BindingSet<RequestHandler> serverBindings = serverBindings(name);
BindingSet<RequestHandler> clientBindings = clientBindings(name);
if (serverBindings == null || clientBindings == null) {
throw new BindingSetNotFoundException(name);
}
return new ContainerSnapshot(this, serverBindings, clientBindings);
}
} |
You are right, the filter predicate should be negated. | private Stream<DeactivatedContainer> deactivatedContainerStream() {
synchronized (lock) {
return activeContainers.entrySet().stream()
.filter(e -> e.getKey() != null)
.filter(e -> !e.getValue().isDeactivated())
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactived));
}
} | .map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactived)); | private Stream<DeactivatedContainer> deactivatedContainerStream() {
synchronized (lock) {
return activeContainers.entrySet().stream()
.filter(e -> e.getValue().isDeactivated())
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactivated));
}
} | class ActiveContainerStatistics {
public interface Metrics {
String TOTAL_DEACTIVATED_CONTAINERS = "jdisc.deactivated_containers.total";
String DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES = "jdisc.deactivated_containers.with_retained_refs";
}
private static final Logger log = Logger.getLogger(ActiveContainerStatistics.class.getName());
private final WeakHashMap<ActiveContainer, ActiveContainerStats> activeContainers = new WeakHashMap<>();
private final Object lock = new Object();
public void onActivated(ActiveContainer activeContainer) {
synchronized (lock) {
activeContainers.put(activeContainer, new ActiveContainerStats(Instant.now()));
}
}
public void onDeactivated(ActiveContainer activeContainer) {
synchronized (lock) {
ActiveContainerStats containerStats = activeContainers.get(activeContainer);
if (containerStats == null) {
throw new IllegalStateException("onActivated() has not been called for container: " + activeContainer);
}
containerStats.setTimeDeactived(Instant.now());
}
}
public void outputMetrics(Metric metric) {
synchronized (lock) {
DeactivatedContainerMetrics metrics = deactivatedContainerStream()
.collect(
DeactivatedContainerMetrics::new,
DeactivatedContainerMetrics::aggregate,
DeactivatedContainerMetrics::merge);
metric.set(Metrics.TOTAL_DEACTIVATED_CONTAINERS, metrics.deactivatedContainerCount, null);
metric.set(Metrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, metrics.deactivatedContainersWithRetainedRefsCount, null);
}
}
public void printSummaryToLog() {
synchronized (lock) {
List<DeactivatedContainer> deactivatedContainers = deactivatedContainerStream().collect(toList());
if (deactivatedContainers.isEmpty()) {
return;
}
log.warning(
"Multiple instances of ActiveContainer leaked! " + deactivatedContainers.size() +
" instances are still present.");
deactivatedContainers.stream()
.map(c -> " - " + c.toSummaryString())
.forEach(log::warning);
}
}
private static class ActiveContainerStats {
public final Instant timeActivated;
public Instant timeDeactived;
public ActiveContainerStats(Instant timeActivated) {
this.timeActivated = timeActivated;
}
public void setTimeDeactived(Instant instant) {
this.timeDeactived = instant;
}
public boolean isDeactivated() {
return timeDeactived == null;
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final Instant timeActivated;
public final Instant timeDeactivated;
public DeactivatedContainer(ActiveContainer activeContainer, Instant timeActivated, Instant timeDeactivated) {
this.activeContainer = activeContainer;
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public String toSummaryString() {
return String.format("%s: timeActivated=%s, timeDeactivated=%s, retainCount=%d",
activeContainer.toString(),
timeActivated.toString(),
timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class DeactivatedContainerMetrics {
public int deactivatedContainerCount = 0;
public int deactivatedContainersWithRetainedRefsCount = 0;
public void aggregate(DeactivatedContainer deactivatedContainer) {
++deactivatedContainerCount;
if (deactivatedContainer.activeContainer.retainCount() > 0) {
++deactivatedContainersWithRetainedRefsCount;
}
}
public DeactivatedContainerMetrics merge(DeactivatedContainerMetrics other) {
deactivatedContainerCount += other.deactivatedContainerCount;
deactivatedContainersWithRetainedRefsCount += other.deactivatedContainersWithRetainedRefsCount;
return this;
}
}
} | class only constructible from this package
public void emitMetrics(Metric metric) {
synchronized (lock) {
DeactivatedContainerMetrics metrics = deactivatedContainerStream()
.collect(
DeactivatedContainerMetrics::new,
DeactivatedContainerMetrics::aggregate,
DeactivatedContainerMetrics::merge);
metric.set(Metrics.TOTAL_DEACTIVATED_CONTAINERS, metrics.deactivatedContainerCount, null);
metric.set(Metrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, metrics.deactivatedContainersWithRetainedRefsCount, null);
}
} |
This should be both region and environment, I guess? | private Path lockPath(ApplicationId instance, ZoneId zone) {
return lockRoot.append(instance.serializedForm() + ":" + zone.region().value());
} | return lockRoot.append(instance.serializedForm() + ":" + zone.region().value()); | private Path lockPath(ApplicationId instance, ZoneId zone) {
return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value());
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration deployLockTimeout = Duration.ofMinutes(30);
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path jobRoot = root.append("jobs");
private static final Path controllerRoot = root.append("controllers");
private static final Path routingPoliciesRoot = root.append("routingPolicies");
private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies");
private static final Path endpointCertificateRoot = root.append("applicationCertificates");
private static final Path archiveBucketsRoot = root.append("archiveBuckets");
private static final Path changeRequestsRoot = root.append("changeRequests");
private static final Path notificationsRoot = root.append("notifications");
private static final Path supportAccessRoot = root.append("supportAccess");
private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer);
private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final RunSerializer runSerializer = new RunSerializer();
private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer();
private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer);
private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer);
private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer();
private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer);
private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer();
private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer();
private final Curator curator;
private final Duration tryLockTimeout;
private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>();
private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this(curator, defaultTryLockTimeout);
}
CuratorDb(Curator curator, Duration tryLockTimeout) {
this.curator = curator;
this.tryLockTimeout = tryLockTimeout;
}
/** Returns all hostnames configured to be part of this ZooKeeper cluster */
public List<String> cluster() {
return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
.filter(hostAndPort -> !hostAndPort.isEmpty())
.map(hostAndPort -> hostAndPort.split(":")[0])
.collect(Collectors.toUnmodifiableList());
}
public Lock lock(TenantName name) {
return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2));
}
public Lock lock(TenantAndApplicationId id) {
return new MultiplePathsLock(lockPath(id), legacyLockPath(id), defaultLockTimeout.multipliedBy(2),curator);
}
public Lock lockForDeployment(ApplicationId id, ZoneId zone) {
return new MultiplePathsLock(lockPath(id, zone), legacyLockPath(id, zone), deployLockTimeout, curator);
}
public Lock lock(ApplicationId id, JobType type) {
return new MultiplePathsLock(lockPath(id, type), legacyLockPath(id, type), defaultLockTimeout, curator);
}
public Lock lock(ApplicationId id, JobType type, Step step) throws TimeoutException {
return tryLock(lockPath(id, type, step), legacyLockPath(id, type, step));
}
public Lock lockRotations() {
return curator.lock(lockRoot.append("rotations"), defaultLockTimeout);
}
public Lock lockConfidenceOverrides() {
return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
try {
return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName));
} catch (TimeoutException e) {
throw new UncheckedTimeoutException(e);
}
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
public Lock lockOsVersions() {
return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout);
}
public Lock lockOsVersionStatus() {
return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout);
}
public Lock lockRoutingPolicies() {
return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout);
}
public Lock lockAuditLog() {
return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout);
}
public Lock lockNameServiceQueue() {
return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout);
}
public Lock lockMeteringRefreshTime() throws TimeoutException {
return tryLock(lockRoot.append("meteringRefreshTime"));
}
public Lock lockArchiveBuckets(ZoneId zoneId) {
return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout);
}
public Lock lockChangeRequests() {
return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout);
}
public Lock lockNotifications(TenantName tenantName) {
return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout);
}
public Lock lockSupportAccess(DeploymentId deploymentId) {
return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout);
}
public Lock lockDeploymentRetriggerQueue() {
return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout);
}
/** Try locking with a low timeout, meaning it is OK to fail lock acquisition.
*
* Useful for maintenance jobs, where there is no point in running the jobs back to back.
*/
private Lock tryLock(Path path) throws TimeoutException {
try {
return curator.lock(path, tryLockTimeout);
}
catch (UncheckedTimeoutException e) {
throw new TimeoutException(e.getMessage());
}
}
/** Try locking with a low timeout, meaning it is OK to fail lock acquisition.
*
* Useful for maintenance jobs, where there is no point in running the jobs back to back.
*/
private Lock tryLock(Path path, Path path2) throws TimeoutException {
try {
return new MultiplePathsLock(path, path2, tryLockTimeout, curator);
}
catch (UncheckedTimeoutException e) {
throw new TimeoutException(e.getMessage());
}
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125);
}
public void writeUpgradesPerMinute(double n) {
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public Optional<Integer> readTargetMajorVersion() {
return read(targetMajorVersionPath(), ByteBuffer::wrap).map(ByteBuffer::getInt);
}
public void writeTargetMajorVersion(Optional<Integer> targetMajorVersion) {
if (targetMajorVersion.isPresent())
curator.set(targetMajorVersionPath(), ByteBuffer.allocate(Integer.BYTES).putInt(targetMajorVersion.get()).array());
else
curator.delete(targetMajorVersionPath());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, ControllerVersion version) {
curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version)));
}
public ControllerVersion readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(controllerVersionSerializer::fromSlime)
.orElse(ControllerVersion.CURRENT);
}
public void writeOsVersionTargets(Set<OsVersionTarget> versions) {
curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions)));
}
public Set<OsVersionTarget> readOsVersionTargets() {
return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet);
}
public void writeOsVersionStatus(OsVersionStatus status) {
curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status)));
}
public OsVersionStatus readOsVersionStatus() {
return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty);
}
public void writeTenant(Tenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<Tenant> readTenant(TenantName name) {
return readSlime(tenantPath(name)).map(bytes -> tenantSerializer.tenantFrom(bytes));
}
public List<Tenant> readTenants() {
return readTenantNames().stream()
.map(this::readTenant)
.flatMap(Optional::stream)
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public List<TenantName> readTenantNames() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.collect(Collectors.toList());
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(TenantAndApplicationId application) {
Path path = applicationPath(application);
return curator.getStat(path)
.map(stat -> cachedApplications.compute(path, (__, old) ->
old != null && old.getFirst() == stat.getVersion()
? old
: new Pair<>(stat.getVersion(), read(path, applicationSerializer::fromSlime).get())).getSecond());
}
public List<Application> readApplications(boolean canFail) {
return readApplications(ignored -> true, canFail);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name), false);
}
private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) {
var applicationIds = readApplicationIds();
var applications = new ArrayList<Application>(applicationIds.size());
for (var id : applicationIds) {
if (!applicationFilter.test(id)) continue;
try {
readApplication(id).ifPresent(applications::add);
} catch (Exception e) {
if (canFail) {
log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " +
"manual intervention", e);
} else {
throw e;
}
}
}
return Collections.unmodifiableList(applications);
}
public List<TenantAndApplicationId> readApplicationIds() {
return curator.getChildren(applicationRoot).stream()
.map(TenantAndApplicationId::fromSerialized)
.sorted()
.collect(toUnmodifiableList());
}
public void removeApplication(TenantAndApplicationId id) {
curator.delete(applicationPath(id));
}
public void writeLastRun(Run run) {
curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run)));
}
public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) {
Path path = runsPath(id, type);
curator.set(path, asJson(runSerializer.toSlime(runs)));
}
public Optional<Run> readLastRun(ApplicationId id, JobType type) {
return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime);
}
public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) {
Path path = runsPath(id, type);
return curator.getStat(path)
.map(stat -> cachedHistoricRuns.compute(path, (__, old) ->
old != null && old.getFirst() == stat.getVersion()
? old
: new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond())
.orElseGet(Collections::emptyNavigableMap);
}
public void deleteRunData(ApplicationId id, JobType type) {
curator.delete(runsPath(id, type));
curator.delete(lastRunPath(id, type));
}
public void deleteRunData(ApplicationId id) {
curator.delete(jobRoot.append(id.serializedForm()));
}
public List<ApplicationId> applicationsWithJobs() {
return curator.getChildren(jobRoot).stream()
.map(ApplicationId::fromSerializedForm)
.collect(Collectors.toList());
}
public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) {
return curator.getData(logPath(id, type, chunkId));
}
public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) {
curator.set(logPath(id, type, chunkId), log);
}
public void deleteLog(ApplicationId id, JobType type) {
curator.delete(runsPath(id, type).append("logs"));
}
public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) {
return curator.getData(lastLogPath(id, type))
.map(String::new).map(Long::parseLong);
}
public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) {
curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes());
}
public LongStream getLogChunkIds(ApplicationId id, JobType type) {
return curator.getChildren(runsPath(id, type).append("logs")).stream()
.mapToLong(Long::parseLong)
.sorted();
}
public AuditLog readAuditLog() {
return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime)
.orElse(AuditLog.empty);
}
public void writeAuditLog(AuditLog log) {
curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log)));
}
public NameServiceQueue readNameServiceQueue() {
return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime)
.orElse(NameServiceQueue.EMPTY);
}
public void writeNameServiceQueue(NameServiceQueue queue) {
curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue)));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) {
for (var policy : policies) {
if (!policy.id().owner().equals(application)) {
throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " +
application.toShortString());
}
}
curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies)));
}
public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() {
return readRoutingPolicies((instance) -> true);
}
public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) {
return curator.getChildren(routingPoliciesRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(filter)
.collect(Collectors.toUnmodifiableMap(Function.identity(),
this::readRoutingPolicies));
}
public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) {
return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime))
.orElseGet(List::of);
}
public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) {
curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy)));
}
public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) {
return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data))
.orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT));
}
public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) {
curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata)));
}
public void deleteEndpointCertificateMetadata(ApplicationId applicationId) {
curator.delete(endpointCertificatePath(applicationId));
}
public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) {
return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString);
}
public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() {
Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>();
for (String appIdString : curator.getChildren(endpointCertificateRoot)) {
ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId);
allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow());
}
return allEndpointCertificateMetadata;
}
public void writeMeteringRefreshTime(long timestamp) {
curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes());
}
public long readMeteringRefreshTime() {
return curator.getData(meteringRefreshPath())
.map(String::new).map(Long::parseLong)
.orElse(0L);
}
public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) {
return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString)
.orElseGet(Set::of);
}
public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) {
curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets)));
}
public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) {
return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime);
}
public List<VespaChangeRequest> readChangeRequests() {
return curator.getChildren(changeRequestsRoot)
.stream()
.map(this::readChangeRequest)
.flatMap(Optional::stream)
.collect(Collectors.toList());
}
public void writeChangeRequest(VespaChangeRequest changeRequest) {
curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest)));
}
public void deleteChangeRequest(VespaChangeRequest changeRequest) {
curator.delete(changeRequestPath(changeRequest.getId()));
}
public List<Notification> readNotifications(TenantName tenantName) {
return readSlime(notificationsPath(tenantName))
.map(slime -> NotificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of);
}
public List<TenantName> listTenantsWithNotifications() {
return curator.getChildren(notificationsRoot).stream()
.map(TenantName::from)
.collect(Collectors.toUnmodifiableList());
}
public void writeNotifications(TenantName tenantName, List<Notification> notifications) {
curator.set(notificationsPath(tenantName), asJson(NotificationsSerializer.toSlime(notifications)));
}
public void deleteNotifications(TenantName tenantName) {
curator.delete(notificationsPath(tenantName));
}
public SupportAccess readSupportAccess(DeploymentId deploymentId) {
return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY);
}
/** Take lock before reading before writing */
public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) {
curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess)));
}
public List<RetriggerEntry> readRetriggerEntries() {
return readSlime(deploymentRetriggerPath()).map(RetriggerEntrySerializer::fromSlime).orElseGet(List::of);
}
public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) {
curator.set(deploymentRetriggerPath(), asJson(RetriggerEntrySerializer.toSlime(retriggerEntries)));
}
private Path lockPath(TenantName tenant) {
return lockRoot
.append(tenant.value());
}
private Path legacyLockPath(TenantAndApplicationId application) {
return lockPath(application.tenant())
.append(application.application().value());
}
private Path legacyLockPath(ApplicationId instance) {
return legacyLockPath(TenantAndApplicationId.from(instance))
.append(instance.instance().value());
}
private Path legacyLockPath(ApplicationId instance, ZoneId zone) {
return legacyLockPath(instance)
.append(zone.environment().value())
.append(zone.region().value());
}
private Path legacyLockPath(ApplicationId instance, JobType type) {
return legacyLockPath(instance)
.append(type.jobName());
}
private Path legacyLockPath(ApplicationId instance, JobType type, Step step) {
return legacyLockPath(instance, type)
.append(step.name());
}
private Path lockPath(TenantAndApplicationId application) {
return lockRoot.append(application.tenant().value() + ":" + application.application().value());
}
private Path lockPath(ApplicationId instance, JobType type) {
return lockRoot.append(instance.serializedForm() + ":" + type.jobName());
}
private Path lockPath(ApplicationId instance, JobType type, Step step) {
return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name());
}
private Path lockPath(String provisionId) {
return lockRoot
.append(provisionStatePath())
.append(provisionId);
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path targetMajorVersionPath() {
return root.append("upgrader").append("targetMajorVersion");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path osVersionTargetsPath() {
return root.append("osUpgrader").append("targetVersion");
}
private static Path osVersionStatusPath() {
return root.append("osVersionStatus");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path routingPolicyPath(ApplicationId application) {
return routingPoliciesRoot.append(application.serializedForm());
}
private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); }
private static Path nameServiceQueuePath() {
return root.append("nameServiceQueue");
}
private static Path auditLogPath() {
return root.append("auditLog");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(TenantAndApplicationId id) {
return applicationRoot.append(id.serialized());
}
private static Path runsPath(ApplicationId id, JobType type) {
return jobRoot.append(id.serializedForm()).append(type.jobName());
}
private static Path lastRunPath(ApplicationId id, JobType type) {
return runsPath(id, type).append("last");
}
private static Path logPath(ApplicationId id, JobType type, long first) {
return runsPath(id, type).append("logs").append(Long.toString(first));
}
private static Path lastLogPath(ApplicationId id, JobType type) {
return runsPath(id, type).append("logs");
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
private static Path endpointCertificatePath(ApplicationId id) {
return endpointCertificateRoot.append(id.serializedForm());
}
private static Path meteringRefreshPath() {
return root.append("meteringRefreshTime");
}
private static Path archiveBucketsPath(ZoneId zoneId) {
return archiveBucketsRoot.append(zoneId.value());
}
private static Path changeRequestPath(String id) {
return changeRequestsRoot.append(id);
}
private static Path notificationsPath(TenantName tenantName) {
return notificationsRoot.append(tenantName.value());
}
private static Path supportAccessPath(DeploymentId deploymentId) {
return supportAccessRoot.append(deploymentId.dottedString());
}
private static Path deploymentRetriggerPath() {
return root.append("deploymentRetriggerQueue");
}
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration deployLockTimeout = Duration.ofMinutes(30);
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path jobRoot = root.append("jobs");
private static final Path controllerRoot = root.append("controllers");
private static final Path routingPoliciesRoot = root.append("routingPolicies");
private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies");
private static final Path endpointCertificateRoot = root.append("applicationCertificates");
private static final Path archiveBucketsRoot = root.append("archiveBuckets");
private static final Path changeRequestsRoot = root.append("changeRequests");
private static final Path notificationsRoot = root.append("notifications");
private static final Path supportAccessRoot = root.append("supportAccess");
private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer);
private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final RunSerializer runSerializer = new RunSerializer();
private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer();
private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer);
private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer);
private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer();
private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer);
private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer();
private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer();
private final Curator curator;
private final Duration tryLockTimeout;
private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>();
private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this(curator, defaultTryLockTimeout);
}
CuratorDb(Curator curator, Duration tryLockTimeout) {
this.curator = curator;
this.tryLockTimeout = tryLockTimeout;
}
/** Returns all hostnames configured to be part of this ZooKeeper cluster */
public List<String> cluster() {
return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
.filter(hostAndPort -> !hostAndPort.isEmpty())
.map(hostAndPort -> hostAndPort.split(":")[0])
.collect(Collectors.toUnmodifiableList());
}
public Lock lock(TenantName name) {
return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2));
}
public Lock lock(TenantAndApplicationId id) {
return new MultiplePathsLock(lockPath(id), legacyLockPath(id), defaultLockTimeout.multipliedBy(2),curator);
}
public Lock lockForDeployment(ApplicationId id, ZoneId zone) {
return new MultiplePathsLock(lockPath(id, zone), legacyLockPath(id, zone), deployLockTimeout, curator);
}
public Lock lock(ApplicationId id, JobType type) {
return new MultiplePathsLock(lockPath(id, type), legacyLockPath(id, type), defaultLockTimeout, curator);
}
public Lock lock(ApplicationId id, JobType type, Step step) throws TimeoutException {
return tryLock(lockPath(id, type, step), legacyLockPath(id, type, step));
}
public Lock lockRotations() {
return curator.lock(lockRoot.append("rotations"), defaultLockTimeout);
}
public Lock lockConfidenceOverrides() {
return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
try {
return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName));
} catch (TimeoutException e) {
throw new UncheckedTimeoutException(e);
}
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
public Lock lockOsVersions() {
return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout);
}
public Lock lockOsVersionStatus() {
return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout);
}
public Lock lockRoutingPolicies() {
return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout);
}
public Lock lockAuditLog() {
return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout);
}
public Lock lockNameServiceQueue() {
return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout);
}
public Lock lockMeteringRefreshTime() throws TimeoutException {
return tryLock(lockRoot.append("meteringRefreshTime"));
}
public Lock lockArchiveBuckets(ZoneId zoneId) {
return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout);
}
public Lock lockChangeRequests() {
return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout);
}
public Lock lockNotifications(TenantName tenantName) {
return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout);
}
public Lock lockSupportAccess(DeploymentId deploymentId) {
return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout);
}
public Lock lockDeploymentRetriggerQueue() {
return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout);
}
/** Try locking with a low timeout, meaning it is OK to fail lock acquisition.
*
* Useful for maintenance jobs, where there is no point in running the jobs back to back.
*/
private Lock tryLock(Path path) throws TimeoutException {
try {
return curator.lock(path, tryLockTimeout);
}
catch (UncheckedTimeoutException e) {
throw new TimeoutException(e.getMessage());
}
}
/** Try locking with a low timeout, meaning it is OK to fail lock acquisition.
*
* Useful for maintenance jobs, where there is no point in running the jobs back to back.
*/
private Lock tryLock(Path path, Path path2) throws TimeoutException {
try {
return new MultiplePathsLock(path, path2, tryLockTimeout, curator);
}
catch (UncheckedTimeoutException e) {
throw new TimeoutException(e.getMessage());
}
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125);
}
public void writeUpgradesPerMinute(double n) {
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public Optional<Integer> readTargetMajorVersion() {
return read(targetMajorVersionPath(), ByteBuffer::wrap).map(ByteBuffer::getInt);
}
public void writeTargetMajorVersion(Optional<Integer> targetMajorVersion) {
if (targetMajorVersion.isPresent())
curator.set(targetMajorVersionPath(), ByteBuffer.allocate(Integer.BYTES).putInt(targetMajorVersion.get()).array());
else
curator.delete(targetMajorVersionPath());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, ControllerVersion version) {
curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version)));
}
public ControllerVersion readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(controllerVersionSerializer::fromSlime)
.orElse(ControllerVersion.CURRENT);
}
public void writeOsVersionTargets(Set<OsVersionTarget> versions) {
curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions)));
}
public Set<OsVersionTarget> readOsVersionTargets() {
return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet);
}
public void writeOsVersionStatus(OsVersionStatus status) {
curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status)));
}
public OsVersionStatus readOsVersionStatus() {
return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty);
}
public void writeTenant(Tenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<Tenant> readTenant(TenantName name) {
return readSlime(tenantPath(name)).map(bytes -> tenantSerializer.tenantFrom(bytes));
}
public List<Tenant> readTenants() {
return readTenantNames().stream()
.map(this::readTenant)
.flatMap(Optional::stream)
.collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public List<TenantName> readTenantNames() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.collect(Collectors.toList());
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(TenantAndApplicationId application) {
Path path = applicationPath(application);
return curator.getStat(path)
.map(stat -> cachedApplications.compute(path, (__, old) ->
old != null && old.getFirst() == stat.getVersion()
? old
: new Pair<>(stat.getVersion(), read(path, applicationSerializer::fromSlime).get())).getSecond());
}
public List<Application> readApplications(boolean canFail) {
return readApplications(ignored -> true, canFail);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name), false);
}
private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) {
var applicationIds = readApplicationIds();
var applications = new ArrayList<Application>(applicationIds.size());
for (var id : applicationIds) {
if (!applicationFilter.test(id)) continue;
try {
readApplication(id).ifPresent(applications::add);
} catch (Exception e) {
if (canFail) {
log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " +
"manual intervention", e);
} else {
throw e;
}
}
}
return Collections.unmodifiableList(applications);
}
public List<TenantAndApplicationId> readApplicationIds() {
return curator.getChildren(applicationRoot).stream()
.map(TenantAndApplicationId::fromSerialized)
.sorted()
.collect(toUnmodifiableList());
}
public void removeApplication(TenantAndApplicationId id) {
curator.delete(applicationPath(id));
}
public void writeLastRun(Run run) {
curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run)));
}
public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) {
Path path = runsPath(id, type);
curator.set(path, asJson(runSerializer.toSlime(runs)));
}
public Optional<Run> readLastRun(ApplicationId id, JobType type) {
return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime);
}
public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) {
Path path = runsPath(id, type);
return curator.getStat(path)
.map(stat -> cachedHistoricRuns.compute(path, (__, old) ->
old != null && old.getFirst() == stat.getVersion()
? old
: new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond())
.orElseGet(Collections::emptyNavigableMap);
}
public void deleteRunData(ApplicationId id, JobType type) {
curator.delete(runsPath(id, type));
curator.delete(lastRunPath(id, type));
}
public void deleteRunData(ApplicationId id) {
curator.delete(jobRoot.append(id.serializedForm()));
}
public List<ApplicationId> applicationsWithJobs() {
return curator.getChildren(jobRoot).stream()
.map(ApplicationId::fromSerializedForm)
.collect(Collectors.toList());
}
public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) {
return curator.getData(logPath(id, type, chunkId));
}
public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) {
curator.set(logPath(id, type, chunkId), log);
}
public void deleteLog(ApplicationId id, JobType type) {
curator.delete(runsPath(id, type).append("logs"));
}
public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) {
return curator.getData(lastLogPath(id, type))
.map(String::new).map(Long::parseLong);
}
public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) {
curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes());
}
public LongStream getLogChunkIds(ApplicationId id, JobType type) {
return curator.getChildren(runsPath(id, type).append("logs")).stream()
.mapToLong(Long::parseLong)
.sorted();
}
public AuditLog readAuditLog() {
return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime)
.orElse(AuditLog.empty);
}
public void writeAuditLog(AuditLog log) {
curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log)));
}
public NameServiceQueue readNameServiceQueue() {
return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime)
.orElse(NameServiceQueue.EMPTY);
}
public void writeNameServiceQueue(NameServiceQueue queue) {
curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue)));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) {
for (var policy : policies) {
if (!policy.id().owner().equals(application)) {
throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " +
application.toShortString());
}
}
curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies)));
}
public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() {
return readRoutingPolicies((instance) -> true);
}
public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) {
return curator.getChildren(routingPoliciesRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(filter)
.collect(Collectors.toUnmodifiableMap(Function.identity(),
this::readRoutingPolicies));
}
public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) {
return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime))
.orElseGet(List::of);
}
public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) {
curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy)));
}
public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) {
return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data))
.orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT));
}
public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) {
curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata)));
}
public void deleteEndpointCertificateMetadata(ApplicationId applicationId) {
curator.delete(endpointCertificatePath(applicationId));
}
public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) {
return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString);
}
public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() {
Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>();
for (String appIdString : curator.getChildren(endpointCertificateRoot)) {
ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString);
Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId);
allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow());
}
return allEndpointCertificateMetadata;
}
public void writeMeteringRefreshTime(long timestamp) {
curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes());
}
public long readMeteringRefreshTime() {
return curator.getData(meteringRefreshPath())
.map(String::new).map(Long::parseLong)
.orElse(0L);
}
public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) {
return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString)
.orElseGet(Set::of);
}
public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) {
curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets)));
}
public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) {
return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime);
}
public List<VespaChangeRequest> readChangeRequests() {
return curator.getChildren(changeRequestsRoot)
.stream()
.map(this::readChangeRequest)
.flatMap(Optional::stream)
.collect(Collectors.toList());
}
public void writeChangeRequest(VespaChangeRequest changeRequest) {
curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest)));
}
public void deleteChangeRequest(VespaChangeRequest changeRequest) {
curator.delete(changeRequestPath(changeRequest.getId()));
}
public List<Notification> readNotifications(TenantName tenantName) {
return readSlime(notificationsPath(tenantName))
.map(slime -> NotificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of);
}
public List<TenantName> listTenantsWithNotifications() {
return curator.getChildren(notificationsRoot).stream()
.map(TenantName::from)
.collect(Collectors.toUnmodifiableList());
}
public void writeNotifications(TenantName tenantName, List<Notification> notifications) {
curator.set(notificationsPath(tenantName), asJson(NotificationsSerializer.toSlime(notifications)));
}
public void deleteNotifications(TenantName tenantName) {
curator.delete(notificationsPath(tenantName));
}
public SupportAccess readSupportAccess(DeploymentId deploymentId) {
return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY);
}
/** Take lock before reading before writing */
public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) {
curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess)));
}
public List<RetriggerEntry> readRetriggerEntries() {
return readSlime(deploymentRetriggerPath()).map(RetriggerEntrySerializer::fromSlime).orElseGet(List::of);
}
public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) {
curator.set(deploymentRetriggerPath(), asJson(RetriggerEntrySerializer.toSlime(retriggerEntries)));
}
private Path lockPath(TenantName tenant) {
return lockRoot
.append(tenant.value());
}
private Path legacyLockPath(TenantAndApplicationId application) {
return lockPath(application.tenant())
.append(application.application().value());
}
private Path legacyLockPath(ApplicationId instance) {
return legacyLockPath(TenantAndApplicationId.from(instance))
.append(instance.instance().value());
}
private Path legacyLockPath(ApplicationId instance, ZoneId zone) {
return legacyLockPath(instance)
.append(zone.environment().value())
.append(zone.region().value());
}
private Path legacyLockPath(ApplicationId instance, JobType type) {
return legacyLockPath(instance)
.append(type.jobName());
}
private Path legacyLockPath(ApplicationId instance, JobType type, Step step) {
return legacyLockPath(instance, type)
.append(step.name());
}
private Path lockPath(TenantAndApplicationId application) {
return lockRoot.append(application.tenant().value() + ":" + application.application().value());
}
private Path lockPath(ApplicationId instance, JobType type) {
return lockRoot.append(instance.serializedForm() + ":" + type.jobName());
}
private Path lockPath(ApplicationId instance, JobType type, Step step) {
return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name());
}
private Path lockPath(String provisionId) {
return lockRoot
.append(provisionStatePath())
.append(provisionId);
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path targetMajorVersionPath() {
return root.append("upgrader").append("targetMajorVersion");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path osVersionTargetsPath() {
return root.append("osUpgrader").append("targetVersion");
}
private static Path osVersionStatusPath() {
return root.append("osVersionStatus");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path routingPolicyPath(ApplicationId application) {
return routingPoliciesRoot.append(application.serializedForm());
}
private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); }
private static Path nameServiceQueuePath() {
return root.append("nameServiceQueue");
}
private static Path auditLogPath() {
return root.append("auditLog");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(TenantAndApplicationId id) {
return applicationRoot.append(id.serialized());
}
private static Path runsPath(ApplicationId id, JobType type) {
return jobRoot.append(id.serializedForm()).append(type.jobName());
}
private static Path lastRunPath(ApplicationId id, JobType type) {
return runsPath(id, type).append("last");
}
private static Path logPath(ApplicationId id, JobType type, long first) {
return runsPath(id, type).append("logs").append(Long.toString(first));
}
private static Path lastLogPath(ApplicationId id, JobType type) {
return runsPath(id, type).append("logs");
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
private static Path endpointCertificatePath(ApplicationId id) {
return endpointCertificateRoot.append(id.serializedForm());
}
private static Path meteringRefreshPath() {
return root.append("meteringRefreshTime");
}
private static Path archiveBucketsPath(ZoneId zoneId) {
return archiveBucketsRoot.append(zoneId.value());
}
private static Path changeRequestPath(String id) {
return changeRequestsRoot.append(id);
}
private static Path notificationsPath(TenantName tenantName) {
return notificationsRoot.append(tenantName.value());
}
private static Path supportAccessPath(DeploymentId deploymentId) {
return supportAccessRoot.append(deploymentId.dottedString());
}
private static Path deploymentRetriggerPath() {
return root.append("deploymentRetriggerQueue");
}
} |
When will the key be null? | private Stream<DeactivatedContainer> deactivatedContainerStream() {
synchronized (lock) {
return activeContainers.entrySet().stream()
.filter(e -> e.getKey() != null)
.filter(e -> !e.getValue().isDeactivated())
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactived));
}
} | .filter(e -> e.getKey() != null) | private Stream<DeactivatedContainer> deactivatedContainerStream() {
synchronized (lock) {
return activeContainers.entrySet().stream()
.filter(e -> e.getValue().isDeactivated())
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue().timeActivated, e.getValue().timeDeactivated));
}
} | class ActiveContainerStatistics {
public interface Metrics {
String TOTAL_DEACTIVATED_CONTAINERS = "jdisc.deactivated_containers.total";
String DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES = "jdisc.deactivated_containers.with_retained_refs";
}
private static final Logger log = Logger.getLogger(ActiveContainerStatistics.class.getName());
private final WeakHashMap<ActiveContainer, ActiveContainerStats> activeContainers = new WeakHashMap<>();
private final Object lock = new Object();
public void onActivated(ActiveContainer activeContainer) {
synchronized (lock) {
activeContainers.put(activeContainer, new ActiveContainerStats(Instant.now()));
}
}
public void onDeactivated(ActiveContainer activeContainer) {
synchronized (lock) {
ActiveContainerStats containerStats = activeContainers.get(activeContainer);
if (containerStats == null) {
throw new IllegalStateException("onActivated() has not been called for container: " + activeContainer);
}
containerStats.setTimeDeactived(Instant.now());
}
}
public void outputMetrics(Metric metric) {
synchronized (lock) {
DeactivatedContainerMetrics metrics = deactivatedContainerStream()
.collect(
DeactivatedContainerMetrics::new,
DeactivatedContainerMetrics::aggregate,
DeactivatedContainerMetrics::merge);
metric.set(Metrics.TOTAL_DEACTIVATED_CONTAINERS, metrics.deactivatedContainerCount, null);
metric.set(Metrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, metrics.deactivatedContainersWithRetainedRefsCount, null);
}
}
public void printSummaryToLog() {
synchronized (lock) {
List<DeactivatedContainer> deactivatedContainers = deactivatedContainerStream().collect(toList());
if (deactivatedContainers.isEmpty()) {
return;
}
log.warning(
"Multiple instances of ActiveContainer leaked! " + deactivatedContainers.size() +
" instances are still present.");
deactivatedContainers.stream()
.map(c -> " - " + c.toSummaryString())
.forEach(log::warning);
}
}
private static class ActiveContainerStats {
public final Instant timeActivated;
public Instant timeDeactived;
public ActiveContainerStats(Instant timeActivated) {
this.timeActivated = timeActivated;
}
public void setTimeDeactived(Instant instant) {
this.timeDeactived = instant;
}
public boolean isDeactivated() {
return timeDeactived == null;
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final Instant timeActivated;
public final Instant timeDeactivated;
public DeactivatedContainer(ActiveContainer activeContainer, Instant timeActivated, Instant timeDeactivated) {
this.activeContainer = activeContainer;
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public String toSummaryString() {
return String.format("%s: timeActivated=%s, timeDeactivated=%s, retainCount=%d",
activeContainer.toString(),
timeActivated.toString(),
timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class DeactivatedContainerMetrics {
public int deactivatedContainerCount = 0;
public int deactivatedContainersWithRetainedRefsCount = 0;
public void aggregate(DeactivatedContainer deactivatedContainer) {
++deactivatedContainerCount;
if (deactivatedContainer.activeContainer.retainCount() > 0) {
++deactivatedContainersWithRetainedRefsCount;
}
}
public DeactivatedContainerMetrics merge(DeactivatedContainerMetrics other) {
deactivatedContainerCount += other.deactivatedContainerCount;
deactivatedContainersWithRetainedRefsCount += other.deactivatedContainersWithRetainedRefsCount;
return this;
}
}
} | class only constructible from this package
public void emitMetrics(Metric metric) {
synchronized (lock) {
DeactivatedContainerMetrics metrics = deactivatedContainerStream()
.collect(
DeactivatedContainerMetrics::new,
DeactivatedContainerMetrics::aggregate,
DeactivatedContainerMetrics::merge);
metric.set(Metrics.TOTAL_DEACTIVATED_CONTAINERS, metrics.deactivatedContainerCount, null);
metric.set(Metrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, metrics.deactivatedContainersWithRetainedRefsCount, null);
}
} |
orElse pulls an empty value back to null which is then tested against | public ConfigServer[] allConfigServers() {
String newVar = System.getenv("VESPA_CONFIGSERVERS");
if (newVar != null && !newVar.isEmpty()) {
return toConfigServers(newVar);
}
String oldVar = getRawInstallVariable("services.addr_configserver").orElse(null);
if (oldVar != null && !oldVar.isEmpty()) {
return toConfigServers(oldVar);
}
return new ConfigServer[0];
} | String oldVar = getRawInstallVariable("services.addr_configserver").orElse(null); | public ConfigServer[] allConfigServers() {
return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVERS"))
.map(Optional::of)
.orElseGet(() -> getRawInstallVariable("services.addr_configserver"))
.map(CloudConfigInstallVariables::toConfigServers)
.orElseGet(() -> new ConfigServer[0]);
} | class CloudConfigInstallVariables implements CloudConfigOptions {
@Override
public Optional<Integer> rpcPort() {
return getInstallVariable("port_configserver_rpc", "services", Integer::parseInt);
}
@Override
public Optional<Boolean> multiTenant() {
return getInstallVariable("multitenant", Boolean::parseBoolean);
}
@Override
@Override
public Optional<Long> zookeeperBarrierTimeout() {
return getInstallVariable("zookeeper_barrier_timeout", Long::parseLong);
}
@Override
public Optional<Long> sessionLifeTimeSecs() {
return getInstallVariable("session_lifetime", Long::parseLong);
}
@Override
public String[] configModelPluginDirs() {
return getRawInstallVariable("cloudconfig_server.config_model_plugin_dirs")
.map(CloudConfigInstallVariables::toConfigModelsPluginDir)
.orElseGet(() -> new String[0]);
}
@Override
public Optional<Integer> zookeeperClientPort() {
return getInstallVariable("zookeeper_clientPort", Integer::parseInt);
}
@Override
public Optional<Integer> zookeeperQuorumPort() {
return getInstallVariable("zookeeper_quoromPort", Integer::parseInt);
}
@Override
public Optional<Integer> zookeeperElectionPort() {
return getInstallVariable("zookeeper_electionPort", Integer::parseInt);
}
@Override
public Optional<String> payloadCompressionType() {
return getInstallVariable("payload_compression_type", Function.identity());
}
@Override
public Optional<String> environment() {
return getInstallVariable("environment");
}
@Override
public Optional<String> region() {
return getInstallVariable("region");
}
@Override
public Optional<String> system() {
return getInstallVariable("system");
}
@Override
public Optional<String> defaultFlavor() {
return getInstallVariable("default_flavor");
}
@Override
public Optional<String> defaultAdminFlavor() {
return getInstallVariable("default_admin_flavor");
}
@Override
public Optional<String> defaultContainerFlavor() {
return getInstallVariable("default_container_flavor");
}
@Override
public Optional<String> defaultContentFlavor() {
return getInstallVariable("default_content_flavor");
}
@Override
public Optional<Boolean> useVespaVersionInRequest() {
return getInstallVariable("use_vespa_version_in_request", Boolean::parseBoolean);
}
@Override
public Optional<Boolean> hostedVespa() {
return getInstallVariable("hosted_vespa", Boolean::parseBoolean);
}
@Override
public Optional<Integer> numParallelTenantLoaders() {
return getInstallVariable("num_parallel_tenant_loaders", Integer::parseInt);
}
@Override
public Optional<String> loadBalancerAddress() {
return getInstallVariable("load_balancer_address");
}
static ConfigServer[] toConfigServers(String configserversString) {
return Arrays.stream(configserversString.split(",|\\s+"))
.map(CloudConfigInstallVariables::toConfigServer)
.toArray(ConfigServer[]::new);
}
static ConfigServer toConfigServer(String configserverString) {
try {
String[] hostPortTuple = configserverString.split(":");
if (configserverString.contains(":")) {
return new ConfigServer(hostPortTuple[0], Optional.of(Integer.parseInt(hostPortTuple[1])));
} else {
return new ConfigServer(configserverString, Optional.empty());
}
} catch (Exception e) {
throw new IllegalArgumentException("Invalid config server " + configserverString, e);
}
}
static String[] toConfigModelsPluginDir(String configModelsPluginDirString) {
return configModelsPluginDirString.split(",|\\s+");
}
private static Optional<String> getInstallVariable(String name) {
return getInstallVariable(name, Function.identity());
}
private static <T> Optional<T> getInstallVariable(String name, Function<String, T> converter) {
return getInstallVariable(name, "cloudconfig_server", converter);
}
private static <T> Optional<T> getInstallVariable(String name, String installPackage, Function<String, T> converter) {
return getRawInstallVariable(installPackage + "." + name).map(converter);
}
private static Optional<String> getRawInstallVariable(String name) {
return Optional.ofNullable(
Optional.ofNullable(System.getenv(name.replace(".", "__")))
.orElseGet(() -> System.getProperty(name)));
}
} | class CloudConfigInstallVariables implements CloudConfigOptions {
@Override
public Optional<Integer> rpcPort() {
return getInstallVariable("port_configserver_rpc", "services", Integer::parseInt);
}
@Override
public Optional<Boolean> multiTenant() {
return getInstallVariable("multitenant", Boolean::parseBoolean);
}
@Override
@Override
public Optional<Long> zookeeperBarrierTimeout() {
return getInstallVariable("zookeeper_barrier_timeout", Long::parseLong);
}
@Override
public Optional<Long> sessionLifeTimeSecs() {
return getInstallVariable("session_lifetime", Long::parseLong);
}
@Override
public String[] configModelPluginDirs() {
return getRawInstallVariable("cloudconfig_server.config_model_plugin_dirs")
.map(CloudConfigInstallVariables::toConfigModelsPluginDir)
.orElseGet(() -> new String[0]);
}
@Override
public Optional<Integer> zookeeperClientPort() {
return getInstallVariable("zookeeper_clientPort", Integer::parseInt);
}
@Override
public Optional<Integer> zookeeperQuorumPort() {
return getInstallVariable("zookeeper_quoromPort", Integer::parseInt);
}
@Override
public Optional<Integer> zookeeperElectionPort() {
return getInstallVariable("zookeeper_electionPort", Integer::parseInt);
}
@Override
public Optional<String> payloadCompressionType() {
return getInstallVariable("payload_compression_type", Function.identity());
}
@Override
public Optional<String> environment() {
return getInstallVariable("environment");
}
@Override
public Optional<String> region() {
return getInstallVariable("region");
}
@Override
public Optional<String> system() {
return getInstallVariable("system");
}
@Override
public Optional<String> defaultFlavor() {
return getInstallVariable("default_flavor");
}
@Override
public Optional<String> defaultAdminFlavor() {
return getInstallVariable("default_admin_flavor");
}
@Override
public Optional<String> defaultContainerFlavor() {
return getInstallVariable("default_container_flavor");
}
@Override
public Optional<String> defaultContentFlavor() {
return getInstallVariable("default_content_flavor");
}
@Override
public Optional<Boolean> useVespaVersionInRequest() {
return getInstallVariable("use_vespa_version_in_request", Boolean::parseBoolean);
}
@Override
public Optional<Boolean> hostedVespa() {
return getInstallVariable("hosted_vespa", Boolean::parseBoolean);
}
@Override
public Optional<Integer> numParallelTenantLoaders() {
return getInstallVariable("num_parallel_tenant_loaders", Integer::parseInt);
}
@Override
public Optional<String> loadBalancerAddress() {
return getInstallVariable("load_balancer_address");
}
static ConfigServer[] toConfigServers(String configserversString) {
return multiValueParameterStream(configserversString)
.map(CloudConfigInstallVariables::toConfigServer)
.toArray(ConfigServer[]::new);
}
static ConfigServer toConfigServer(String configserverString) {
try {
String[] hostPortTuple = configserverString.split(":");
if (configserverString.contains(":")) {
return new ConfigServer(hostPortTuple[0], Optional.of(Integer.parseInt(hostPortTuple[1])));
} else {
return new ConfigServer(configserverString, Optional.empty());
}
} catch (Exception e) {
throw new IllegalArgumentException("Invalid config server " + configserverString, e);
}
}
static String[] toConfigModelsPluginDir(String configModelsPluginDirString) {
return multiValueParameterStream(configModelsPluginDirString).toArray(String[]::new);
}
private static Optional<String> getInstallVariable(String name) {
return getInstallVariable(name, Function.identity());
}
private static <T> Optional<T> getInstallVariable(String name, Function<String, T> converter) {
return getInstallVariable(name, "cloudconfig_server", converter);
}
private static <T> Optional<T> getInstallVariable(String name, String installPackage, Function<String, T> converter) {
return getRawInstallVariable(installPackage + "." + name).map(converter);
}
private static Optional<String> getRawInstallVariable(String name) {
return Optional.ofNullable(
Optional.ofNullable(System.getenv(name.replace(".", "__")))
.orElseGet(() -> System.getProperty(name)));
}
private static Stream<String> multiValueParameterStream(String param) {
return Arrays.stream(param.split("[, ]")).filter(value -> !value.isEmpty());
}
} |
Consider using per-unit methods (e.g. `ofHours`) for the conversions and `plus` method to add them together. | public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay"))
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
else
steps.add(new ZoneDeployment(environment,
Optional.of(RegionName.from(XML.getValue(stepTag).trim())),
readActive(stepTag)));
}
}
else {
steps.add(new ZoneDeployment(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
} | steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 + | public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay"))
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
else
steps.add(new ZoneDeployment(environment,
Optional.of(RegionName.from(XML.getValue(stepTag).trim())),
readActive(stepTag)));
}
}
else {
steps.add(new ZoneDeployment(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new ZoneDeployment(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new ZoneDeployment(Environment.test));
}
ZoneDeployment testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
ZoneDeployment stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static ZoneDeployment remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (ZoneDeployment)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns the XML form of this spec, or null if it was not created by fromXml or is the empty spec */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Integer.parseInt(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class ZoneDeployment extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public ZoneDeployment(Environment environment) {
this(environment, Optional.empty(), false);
}
public ZoneDeployment(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new ZoneDeployment(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new ZoneDeployment(Environment.test));
}
ZoneDeployment testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
ZoneDeployment stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static ZoneDeployment remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (ZoneDeployment)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns the XML form of this spec, or null if it was not created by fromXml or is the empty spec */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class ZoneDeployment extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public ZoneDeployment(Environment environment) {
this(environment, Optional.empty(), false);
}
public ZoneDeployment(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof ZoneDeployment)) return false;
ZoneDeployment other = (ZoneDeployment)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} |
Should be `Long.parseLong` or return `int` instead. | private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Integer.parseInt(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
} | return Integer.parseInt(value); | private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new ZoneDeployment(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new ZoneDeployment(Environment.test));
}
ZoneDeployment testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
ZoneDeployment stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static ZoneDeployment remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (ZoneDeployment)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns the XML form of this spec, or null if it was not created by fromXml or is the empty spec */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay"))
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
else
steps.add(new ZoneDeployment(environment,
Optional.of(RegionName.from(XML.getValue(stepTag).trim())),
readActive(stepTag)));
}
}
else {
steps.add(new ZoneDeployment(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
}
/** Returns the given attribute as an integer, or 0 if it is not present */
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class ZoneDeployment extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public ZoneDeployment(Environment environment) {
this(environment, Optional.empty(), false);
}
public ZoneDeployment(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new ZoneDeployment(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new ZoneDeployment(Environment.test));
}
ZoneDeployment testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
ZoneDeployment stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static ZoneDeployment remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (ZoneDeployment)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns the XML form of this spec, or null if it was not created by fromXml or is the empty spec */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay"))
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
else
steps.add(new ZoneDeployment(environment,
Optional.of(RegionName.from(XML.getValue(stepTag).trim())),
readActive(stepTag)));
}
}
else {
steps.add(new ZoneDeployment(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
}
/** Returns the given attribute as an integer, or 0 if it is not present */
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class ZoneDeployment extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public ZoneDeployment(Environment environment) {
this(environment, Optional.empty(), false);
}
public ZoneDeployment(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof ZoneDeployment)) return false;
ZoneDeployment other = (ZoneDeployment)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} |
Considered. But it became longer. | public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay"))
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
else
steps.add(new ZoneDeployment(environment,
Optional.of(RegionName.from(XML.getValue(stepTag).trim())),
readActive(stepTag)));
}
}
else {
steps.add(new ZoneDeployment(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
} | steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 + | public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay"))
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
else
steps.add(new ZoneDeployment(environment,
Optional.of(RegionName.from(XML.getValue(stepTag).trim())),
readActive(stepTag)));
}
}
else {
steps.add(new ZoneDeployment(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new ZoneDeployment(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new ZoneDeployment(Environment.test));
}
ZoneDeployment testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
ZoneDeployment stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static ZoneDeployment remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (ZoneDeployment)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns the XML form of this spec, or null if it was not created by fromXml or is the empty spec */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Integer.parseInt(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class ZoneDeployment extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public ZoneDeployment(Environment environment) {
this(environment, Optional.empty(), false);
}
public ZoneDeployment(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new ZoneDeployment(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new ZoneDeployment(Environment.test));
}
ZoneDeployment testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
ZoneDeployment stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static ZoneDeployment remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (ZoneDeployment)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns the XML form of this spec, or null if it was not created by fromXml or is the empty spec */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class ZoneDeployment extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public ZoneDeployment(Environment environment) {
this(environment, Optional.empty(), false);
}
public ZoneDeployment(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof ZoneDeployment)) return false;
ZoneDeployment other = (ZoneDeployment)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} |
Thanks - done. | private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Integer.parseInt(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
} | return Integer.parseInt(value); | private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new ZoneDeployment(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new ZoneDeployment(Environment.test));
}
ZoneDeployment testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
ZoneDeployment stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static ZoneDeployment remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (ZoneDeployment)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns the XML form of this spec, or null if it was not created by fromXml or is the empty spec */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay"))
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
else
steps.add(new ZoneDeployment(environment,
Optional.of(RegionName.from(XML.getValue(stepTag).trim())),
readActive(stepTag)));
}
}
else {
steps.add(new ZoneDeployment(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
}
/** Returns the given attribute as an integer, or 0 if it is not present */
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class ZoneDeployment extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public ZoneDeployment(Environment environment) {
this(environment, Optional.empty(), false);
}
public ZoneDeployment(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new ZoneDeployment(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new ZoneDeployment(Environment.test));
}
ZoneDeployment testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
ZoneDeployment stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static ZoneDeployment remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (ZoneDeployment)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns the XML form of this spec, or null if it was not created by fromXml or is the empty spec */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay"))
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
else
steps.add(new ZoneDeployment(environment,
Optional.of(RegionName.from(XML.getValue(stepTag).trim())),
readActive(stepTag)));
}
}
else {
steps.add(new ZoneDeployment(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
}
/** Returns the given attribute as an integer, or 0 if it is not present */
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class ZoneDeployment extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public ZoneDeployment(Environment environment) {
this(environment, Optional.empty(), false);
}
public ZoneDeployment(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof ZoneDeployment)) return false;
ZoneDeployment other = (ZoneDeployment)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} |
Oops.. we removed this by accident! I'll put it back. | private static Set<Metric> getQrserverMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("peak_qps.max", "peak_qps"));
metrics.add(new Metric("search_connections.average", "search_connections"));
metrics.add(new Metric("active_queries.average", "active_queries"));
metrics.add(new Metric("feed.latency.average"));
metrics.add(new Metric("query_latency.average", "mean_query_latency"));
metrics.add(new Metric("query_latency.max", "max_query_latency"));
metrics.add(new Metric("query_latency.95percentile", "95p_query_latency"));
metrics.add(new Metric("query_latency.99percentile", "99p_query_latency"));
metrics.add(new Metric("failed_queries.rate", "failed_queries"));
metrics.add(new Metric("hits_per_query.average", "hits_per_query"));
metrics.add(new Metric("totalhits_per_query.average", "totalhits_per_query"));
metrics.add(new Metric("empty_results.rate", "empty_results"));
metrics.add(new Metric("requestsOverQuota.rate"));
metrics.add(new Metric("requestsOverQuota.count"));
metrics.add(new Metric("error.timeout.rate","error.timeout"));
metrics.add(new Metric("error.backends_oos.rate","error.backends_oos"));
metrics.add(new Metric("error.plugin_failure.rate","error.plugin_failure"));
metrics.add(new Metric("error.backend_communication_error.rate","error.backend_communication_error"));
metrics.add(new Metric("error.empty_document_summaries.rate","error.empty_document_summaries"));
metrics.add(new Metric("error.invalid_query_parameter.rate","error.invalid_query_parameter"));
metrics.add(new Metric("error.internal_server_error.rate", "error.internal_server_error"));
metrics.add(new Metric("error.misconfigured_server.rate","error.misconfigured_server"));
metrics.add(new Metric("error.invalid_query_transformation.rate","error.invalid_query_transformation"));
metrics.add(new Metric("error.result_with_errors.rate","error.result_with_errors"));
metrics.add(new Metric("error.unspecified.rate","error.unspecified"));
metrics.add(new Metric("error.unhandled_exception.rate","error.unhandled_exception"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
metrics.add(new Metric("http.status.3xx.rate"));
metrics.add(new Metric("http.status.4xx.rate"));
metrics.add(new Metric("http.status.5xx.rate"));
return metrics;
} | metrics.add(new Metric("search_connections.average", "search_connections")); | private static Set<Metric> getQrserverMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("peak_qps.max", "peak_qps"));
metrics.add(new Metric("search_connections.average", "search_connections"));
metrics.add(new Metric("active_queries.average", "active_queries"));
metrics.add(new Metric("feed.latency.average"));
metrics.add(new Metric("query_latency.average", "mean_query_latency"));
metrics.add(new Metric("query_latency.max", "max_query_latency"));
metrics.add(new Metric("query_latency.95percentile", "95p_query_latency"));
metrics.add(new Metric("query_latency.99percentile", "99p_query_latency"));
metrics.add(new Metric("failed_queries.rate", "failed_queries"));
metrics.add(new Metric("hits_per_query.average", "hits_per_query"));
metrics.add(new Metric("totalhits_per_query.average", "totalhits_per_query"));
metrics.add(new Metric("empty_results.rate", "empty_results"));
metrics.add(new Metric("requestsOverQuota.rate"));
metrics.add(new Metric("requestsOverQuota.count"));
metrics.add(new Metric("error.timeout.rate","error.timeout"));
metrics.add(new Metric("error.backends_oos.rate","error.backends_oos"));
metrics.add(new Metric("error.plugin_failure.rate","error.plugin_failure"));
metrics.add(new Metric("error.backend_communication_error.rate","error.backend_communication_error"));
metrics.add(new Metric("error.empty_document_summaries.rate","error.empty_document_summaries"));
metrics.add(new Metric("error.invalid_query_parameter.rate","error.invalid_query_parameter"));
metrics.add(new Metric("error.internal_server_error.rate", "error.internal_server_error"));
metrics.add(new Metric("error.misconfigured_server.rate","error.misconfigured_server"));
metrics.add(new Metric("error.invalid_query_transformation.rate","error.invalid_query_transformation"));
metrics.add(new Metric("error.result_with_errors.rate","error.result_with_errors"));
metrics.add(new Metric("error.unspecified.rate","error.unspecified"));
metrics.add(new Metric("error.unhandled_exception.rate","error.unhandled_exception"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
metrics.add(new Metric("http.status.3xx.rate"));
metrics.add(new Metric("http.status.4xx.rate"));
metrics.add(new Metric("http.status.5xx.rate"));
return metrics;
} | class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
getVespaMetrics(),
singleton(defaultVespaMetricSet));
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.addAll(getSearchNodeMetrics());
metrics.addAll(getStorageMetrics());
metrics.addAll(getDocprocMetrics());
metrics.addAll(getClusterControllerMetrics());
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count", "slobrok.heartbeats.failed"));
return metrics;
}
private static Set<Metric> getConfigServerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("configserver.requests.count", "configserver.requests"));
metrics.add(new Metric("configserver.failedRequests.count", "configserver.failedRequests"));
metrics.add(new Metric("configserver.latency.average", "configserver.latency"));
metrics.add(new Metric("configserver.cacheConfigElems.last", "configserver.cacheConfigElems"));
metrics.add(new Metric("configserver.cacheChecksumElems.last", "configserver.cacheChecksumElems"));
metrics.add(new Metric("configserver.hosts.last", "configserver.hosts"));
metrics.add(new Metric("configserver.delayedResponses.count", "configserver.delayedResponses"));
metrics.add(new Metric("configserver.sessionChangeErrors.count", "configserver.sessionChangeErrors"));
return metrics;
}
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("serverRejectedRequests.rate"));
metrics.add(new Metric("serverRejectedRequests.count"));
metrics.add(new Metric("serverThreadPoolSize.average"));
metrics.add(new Metric("serverThreadPoolSize.min"));
metrics.add(new Metric("serverThreadPoolSize.max"));
metrics.add(new Metric("serverThreadPoolSize.rate"));
metrics.add(new Metric("serverThreadPoolSize.count"));
metrics.add(new Metric("serverThreadPoolSize.last"));
metrics.add(new Metric("serverActiveThreads.average"));
metrics.add(new Metric("serverActiveThreads.min"));
metrics.add(new Metric("serverActiveThreads.max"));
metrics.add(new Metric("serverActiveThreads.rate"));
metrics.add(new Metric("serverActiveThreads.count"));
metrics.add(new Metric("serverActiveThreads.last"));
metrics.add(new Metric("httpapi_latency.average"));
metrics.add(new Metric("httpapi_pending.average"));
metrics.add(new Metric("httpapi_num_operations.rate"));
metrics.add(new Metric("httpapi_num_updates.rate"));
metrics.add(new Metric("httpapi_num_removes.rate"));
metrics.add(new Metric("httpapi_num_puts.rate"));
metrics.add(new Metric("httpapi_succeeded.rate"));
metrics.add(new Metric("httpapi_failed.rate"));
metrics.add(new Metric("mem.heap.total.average"));
metrics.add(new Metric("mem.heap.free.average"));
metrics.add(new Metric("mem.heap.used.average"));
metrics.add(new Metric("jdisc.memory_mappings.max"));
metrics.add(new Metric("jdisc.open_file_descriptors.max"));
return metrics;
}
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("cluster-controller.down.count.last"));
metrics.add(new Metric("cluster-controller.initializing.count.last"));
metrics.add(new Metric("cluster-controller.maintenance.count.last"));
metrics.add(new Metric("cluster-controller.retired.count.last"));
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count", "content.cluster-controller.cluster-state-change.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.node-event.count"));
return metrics;
}
private static Set<Metric> getDocprocMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("documents_processed.rate", "documents_processed"));
return metrics;
}
private static Set<Metric> getSearchNodeMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("proton.numstoreddocs.last", "documents_total"));
metrics.add(new Metric("proton.numindexeddocs.last", "documents_ready"));
metrics.add(new Metric("proton.numactivedocs.last", "documents_active"));
metrics.add(new Metric("proton.numremoveddocs.last", "documents_removed"));
metrics.add(new Metric("proton.docsinmemory.last", "documents_inmemory"));
metrics.add(new Metric("proton.diskusage.last", "diskusage"));
metrics.add(new Metric("proton.memoryusage.max", "content.proton.memoryusage.max"));
metrics.add(new Metric("proton.transport.query.count.rate", "query_requests"));
metrics.add(new Metric("proton.transport.docsum.docs.rate", "document_requests"));
metrics.add(new Metric("proton.transport.docsum.latency.average", "content.proton.transport.docsum.latency.average"));
metrics.add(new Metric("proton.transport.query.latency.average", "query_latency"));
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average"));
metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.resource_usage.disk.average"));
metrics.add(new Metric("content.proton.resource_usage.memory.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max"));
metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.enum_store.average"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.multi_value.average"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last"));
metrics.add(new Metric("content.proton.transactionlog.entries.average"));
metrics.add(new Metric("content.proton.transactionlog.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average"));
return metrics;
}
private static Set<Metric> getStorageMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.datastored.alldisks.docs.average","docs"));
metrics.add(new Metric("vds.datastored.alldisks.bytes.average","bytes"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average","visitorlifetime"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average","visitorqueuewait"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate","put"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate","remove"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate","get"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate","update"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.average","diskqueuesize"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average","diskqueuewait"));
metrics.add(new Metric("vds.memfilepersistence.cache.files.average"));
metrics.add(new Metric("vds.memfilepersistence.cache.body.average"));
metrics.add(new Metric("vds.memfilepersistence.cache.header.average"));
metrics.add(new Metric("vds.memfilepersistence.cache.meta.average"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate","visit"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate"));
metrics.add(new Metric("vds.filestor.spi.put.success.average"));
metrics.add(new Metric("vds.filestor.spi.remove.success.average"));
metrics.add(new Metric("vds.filestor.spi.update.success.average"));
metrics.add(new Metric("vds.filestor.spi.get.success.average"));
metrics.add(new Metric("vds.filestor.spi.iterate.success.average"));
metrics.add(new Metric("vds.filestor.spi.put.success.rate"));
metrics.add(new Metric("vds.filestor.spi.remove.success.rate"));
metrics.add(new Metric("vds.filestor.spi.update.success.rate"));
metrics.add(new Metric("vds.filestor.spi.get.success.rate"));
metrics.add(new Metric("vds.filestor.spi.iterate.success.rate"));
metrics.add(new Metric("vds.idealstate.buckets_rechecking.average"));
metrics.add(new Metric("vds.idealstate.idealstate_diff.average"));
metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average"));
metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average"));
metrics.add(new Metric("vds.idealstate.buckets.average"));
metrics.add(new Metric("vds.idealstate.buckets_notrusted.average"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate","deleteok"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate","deletefailed"));
metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average","deletepending"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate","mergeok"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate","mergefailed"));
metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average","mergepending"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate","splitok"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate","splitfailed"));
metrics.add(new Metric("vds.idealstate.split_bucket.pending.average","splitpending"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate","joinok"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate","joinfailed"));
metrics.add(new Metric("vds.idealstate.join_bucket.pending.average","joinpending"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.average"));
metrics.add(new Metric("vds.distributor.puts.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removes.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.average"));
metrics.add(new Metric("vds.distributor.updates.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.average"));
metrics.add(new Metric("vds.distributor.gets.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.average"));
metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.docsstored.average"));
metrics.add(new Metric("vds.distributor.bytesstored.average"));
return metrics;
}
} | class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
getVespaMetrics(),
singleton(defaultVespaMetricSet));
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.addAll(getSearchNodeMetrics());
metrics.addAll(getStorageMetrics());
metrics.addAll(getDocprocMetrics());
metrics.addAll(getClusterControllerMetrics());
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count", "slobrok.heartbeats.failed"));
return metrics;
}
private static Set<Metric> getConfigServerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("configserver.requests.count", "configserver.requests"));
metrics.add(new Metric("configserver.failedRequests.count", "configserver.failedRequests"));
metrics.add(new Metric("configserver.latency.average", "configserver.latency"));
metrics.add(new Metric("configserver.cacheConfigElems.last", "configserver.cacheConfigElems"));
metrics.add(new Metric("configserver.cacheChecksumElems.last", "configserver.cacheChecksumElems"));
metrics.add(new Metric("configserver.hosts.last", "configserver.hosts"));
metrics.add(new Metric("configserver.delayedResponses.count", "configserver.delayedResponses"));
metrics.add(new Metric("configserver.sessionChangeErrors.count", "configserver.sessionChangeErrors"));
return metrics;
}
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("serverRejectedRequests.rate"));
metrics.add(new Metric("serverRejectedRequests.count"));
metrics.add(new Metric("serverThreadPoolSize.average"));
metrics.add(new Metric("serverThreadPoolSize.min"));
metrics.add(new Metric("serverThreadPoolSize.max"));
metrics.add(new Metric("serverThreadPoolSize.rate"));
metrics.add(new Metric("serverThreadPoolSize.count"));
metrics.add(new Metric("serverThreadPoolSize.last"));
metrics.add(new Metric("serverActiveThreads.average"));
metrics.add(new Metric("serverActiveThreads.min"));
metrics.add(new Metric("serverActiveThreads.max"));
metrics.add(new Metric("serverActiveThreads.rate"));
metrics.add(new Metric("serverActiveThreads.count"));
metrics.add(new Metric("serverActiveThreads.last"));
metrics.add(new Metric("httpapi_latency.average"));
metrics.add(new Metric("httpapi_pending.average"));
metrics.add(new Metric("httpapi_num_operations.rate"));
metrics.add(new Metric("httpapi_num_updates.rate"));
metrics.add(new Metric("httpapi_num_removes.rate"));
metrics.add(new Metric("httpapi_num_puts.rate"));
metrics.add(new Metric("httpapi_succeeded.rate"));
metrics.add(new Metric("httpapi_failed.rate"));
metrics.add(new Metric("mem.heap.total.average"));
metrics.add(new Metric("mem.heap.free.average"));
metrics.add(new Metric("mem.heap.used.average"));
metrics.add(new Metric("jdisc.memory_mappings.max"));
metrics.add(new Metric("jdisc.open_file_descriptors.max"));
return metrics;
}
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("cluster-controller.down.count.last"));
metrics.add(new Metric("cluster-controller.initializing.count.last"));
metrics.add(new Metric("cluster-controller.maintenance.count.last"));
metrics.add(new Metric("cluster-controller.retired.count.last"));
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count", "content.cluster-controller.cluster-state-change.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.node-event.count"));
return metrics;
}
private static Set<Metric> getDocprocMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("documents_processed.rate", "documents_processed"));
return metrics;
}
private static Set<Metric> getSearchNodeMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("proton.numstoreddocs.last", "documents_total"));
metrics.add(new Metric("proton.numindexeddocs.last", "documents_ready"));
metrics.add(new Metric("proton.numactivedocs.last", "documents_active"));
metrics.add(new Metric("proton.numremoveddocs.last", "documents_removed"));
metrics.add(new Metric("proton.docsinmemory.last", "documents_inmemory"));
metrics.add(new Metric("proton.diskusage.last", "diskusage"));
metrics.add(new Metric("proton.memoryusage.max", "content.proton.memoryusage.max"));
metrics.add(new Metric("proton.transport.query.count.rate", "query_requests"));
metrics.add(new Metric("proton.transport.docsum.docs.rate", "document_requests"));
metrics.add(new Metric("proton.transport.docsum.latency.average", "content.proton.transport.docsum.latency.average"));
metrics.add(new Metric("proton.transport.query.latency.average", "query_latency"));
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average"));
metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.resource_usage.disk.average"));
metrics.add(new Metric("content.proton.resource_usage.memory.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max"));
metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.enum_store.average"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.multi_value.average"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last"));
metrics.add(new Metric("content.proton.transactionlog.entries.average"));
metrics.add(new Metric("content.proton.transactionlog.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average"));
return metrics;
}
private static Set<Metric> getStorageMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.datastored.alldisks.docs.average","docs"));
metrics.add(new Metric("vds.datastored.alldisks.bytes.average","bytes"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average","visitorlifetime"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average","visitorqueuewait"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate","put"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate","remove"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate","get"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate","update"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.average","diskqueuesize"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average","diskqueuewait"));
metrics.add(new Metric("vds.memfilepersistence.cache.files.average"));
metrics.add(new Metric("vds.memfilepersistence.cache.body.average"));
metrics.add(new Metric("vds.memfilepersistence.cache.header.average"));
metrics.add(new Metric("vds.memfilepersistence.cache.meta.average"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate","visit"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate"));
metrics.add(new Metric("vds.filestor.spi.put.success.average"));
metrics.add(new Metric("vds.filestor.spi.remove.success.average"));
metrics.add(new Metric("vds.filestor.spi.update.success.average"));
metrics.add(new Metric("vds.filestor.spi.get.success.average"));
metrics.add(new Metric("vds.filestor.spi.iterate.success.average"));
metrics.add(new Metric("vds.filestor.spi.put.success.rate"));
metrics.add(new Metric("vds.filestor.spi.remove.success.rate"));
metrics.add(new Metric("vds.filestor.spi.update.success.rate"));
metrics.add(new Metric("vds.filestor.spi.get.success.rate"));
metrics.add(new Metric("vds.filestor.spi.iterate.success.rate"));
metrics.add(new Metric("vds.idealstate.buckets_rechecking.average"));
metrics.add(new Metric("vds.idealstate.idealstate_diff.average"));
metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average"));
metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average"));
metrics.add(new Metric("vds.idealstate.buckets.average"));
metrics.add(new Metric("vds.idealstate.buckets_notrusted.average"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate","deleteok"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate","deletefailed"));
metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average","deletepending"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate","mergeok"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate","mergefailed"));
metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average","mergepending"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate","splitok"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate","splitfailed"));
metrics.add(new Metric("vds.idealstate.split_bucket.pending.average","splitpending"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate","joinok"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate","joinfailed"));
metrics.add(new Metric("vds.idealstate.join_bucket.pending.average","joinpending"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.average"));
metrics.add(new Metric("vds.distributor.puts.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removes.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.average"));
metrics.add(new Metric("vds.distributor.updates.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.average"));
metrics.add(new Metric("vds.distributor.gets.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.average"));
metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.docsstored.average"));
metrics.add(new Metric("vds.distributor.bytesstored.average"));
return metrics;
}
} |
I think you should exit the loop here ... | void redeployAllApplications(Deployer deployer) {
ExecutorService deploymentExecutor = Executors.newCachedThreadPool();
Set<ApplicationId> applicationIds = new HashSet<>();
tenants.getAllTenants().forEach(tenant -> applicationIds.addAll(tenant.getApplicationRepo().listApplications()));
int applicationsRedeployed = 0;
for (ApplicationId applicationId : applicationIds) {
redeployApplication(applicationId, deployer, deploymentExecutor);
log.log(LogLevel.INFO, String.format("Redeployed %s of %s applications", ++applicationsRedeployed, applicationIds.size()));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
} | e.printStackTrace(); | void redeployAllApplications(Deployer deployer) {
ExecutorService deploymentExecutor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders());
tenants.getAllTenants().forEach(tenant -> listApplicationIds(tenant)
.forEach(applicationId -> redeployApplication(applicationId, deployer, deploymentExecutor)));
} | class ApplicationRepository implements com.yahoo.config.provision.Deployer {
private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName());
private final Tenants tenants;
private final Optional<Provisioner> hostProvisioner;
private final Curator curator;
private final LogServerLogGrabber logServerLogGrabber;
private final ApplicationConvergenceChecker convergeChecker;
private final HttpProxy httpProxy;
private final Clock clock;
private final DeployLogger logger = new SilentDeployLogger();
private final Environment environment;
@Inject
public ApplicationRepository(Tenants tenants,
HostProvisionerProvider hostProvisionerProvider,
Curator curator,
LogServerLogGrabber logServerLogGrabber,
ApplicationConvergenceChecker applicationConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig) {
this.tenants = tenants;
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
this.curator = curator;
this.logServerLogGrabber = logServerLogGrabber;
this.convergeChecker = applicationConvergenceChecker;
this.httpProxy = httpProxy;
this.clock = Clock.systemUTC();
this.environment = Environment.from(configserverConfig.environment());
}
/**
* Creates a new deployment from the active application, if available.
*
* @param application the active application to be redeployed
* @param timeout the timeout to use for each individual deployment operation
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) {
Tenant tenant = tenants.getTenant(application.tenant());
LocalSession activeSession = tenant.getLocalSessionRepo().getActiveSession(application);
if (activeSession == null) return Optional.empty();
TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout);
LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, timeoutBudget);
tenant.getLocalSessionRepo().addSession(newSession);
Version version = environment.isManuallyDeployed() ? Vtag.currentVersion : newSession.getVespaVersion();
return Optional.of(Deployment.unprepared(newSession,
tenant.getLocalSessionRepo(),
tenant.getPath(),
hostProvisioner,
new ActivateLock(curator, tenant.getPath()),
timeout,
clock,
false,
version));
}
public Deployment deployFromPreparedSession(LocalSession session, ActivateLock lock, LocalSessionRepo localSessionRepo, Duration timeout) {
return Deployment.prepared(session,
localSessionRepo,
hostProvisioner,
lock,
timeout,
clock);
}
/**
* Removes a previously deployed application
*
* @return true if the application was found and removed, false if it was not present
* @throws RuntimeException if the remove transaction fails. This method is exception safe.
*/
public boolean remove(ApplicationId applicationId) {
Optional<Tenant> owner = Optional.ofNullable(tenants.getTenant(applicationId.tenant()));
if ( ! owner.isPresent()) return false;
TenantApplications tenantApplications = owner.get().getApplicationRepo();
if ( ! tenantApplications.listApplications().contains(applicationId)) return false;
long sessionId = tenantApplications.getSessionIdForApplication(applicationId);
LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo();
LocalSession session = localSessionRepo.getSession(sessionId);
if (session == null) return false;
NestedTransaction transaction = new NestedTransaction();
localSessionRepo.removeSession(session.getSessionId(), transaction);
session.delete(transaction);
transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId));
transaction.add(tenantApplications.deleteApplication(applicationId));
hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId));
transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId));
transaction.commit();
return true;
}
public String grabLog(Tenant tenant, ApplicationId applicationId) {
Application application = getApplication(tenant, applicationId);
return logServerLogGrabber.grabLog(application);
}
public HttpResponse serviceConvergenceCheck(Tenant tenant, ApplicationId applicationId, String hostname, URI uri) {
Application application = getApplication(tenant, applicationId);
return convergeChecker.serviceConvergenceCheck(application, hostname, uri);
}
public HttpResponse serviceListToCheckForConfigConvergence(Tenant tenant, ApplicationId applicationId, URI uri) {
Application application = getApplication(tenant, applicationId);
return convergeChecker.serviceListToCheckForConfigConvergence(application, uri);
}
public HttpResponse clusterControllerStatusPage(
Tenant tenant,
ApplicationId applicationId,
String hostName,
String pathSuffix) {
Application application = getApplication(tenant, applicationId);
String relativePath = "clustercontroller-status/" + pathSuffix;
return httpProxy.get(application, hostName, "container-clustercontroller", relativePath);
}
public Long getApplicationGeneration(Tenant tenant, ApplicationId applicationId) {
return getApplication(tenant, applicationId).getApplicationGeneration();
}
private Application getApplication(Tenant tenant, ApplicationId applicationId) {
long sessionId = getSessionIdForApplication(tenant, applicationId);
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0);
return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty());
}
public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) {
return tenant.getApplicationRepo().getSessionIdForApplication(applicationId);
}
private LocalSession getLocalSession(Tenant tenant, long sessionId) {
LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private RemoteSession getRemoteSession(Tenant tenant, long sessionId) {
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
public void restart(ApplicationId applicationId, HostFilter hostFilter) {
hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter));
}
public Tenant verifyTenantAndApplication(ApplicationId applicationId) {
TenantName tenantName = applicationId.tenant();
if (!tenants.checkThatTenantExists(tenantName)) {
throw new IllegalArgumentException("Tenant " + tenantName + " was not found.");
}
Tenant tenant = tenants.getTenant(tenantName);
List<ApplicationId> applicationIds = listApplicationIds(tenant);
if (!applicationIds.contains(applicationId)) {
throw new IllegalArgumentException("No such application id: " + applicationId);
}
return tenant;
}
public ApplicationId activate(Tenant tenant,
long sessionId,
TimeoutBudget timeoutBudget,
boolean ignoreLockFailure,
boolean ignoreSessionStaleFailure) {
LocalSession localSession = getLocalSession(tenant, sessionId);
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
ActivateLock activateLock = tenant.getActivateLock();
Deployment deployment = deployFromPreparedSession(localSession,
activateLock,
localSessionRepo,
timeoutBudget.timeLeft());
deployment.setIgnoreLockFailure(ignoreLockFailure);
deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure);
deployment.activate();
return localSession.getApplicationId();
}
public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) {
return getLocalSession(tenant, sessionId).getMetaData();
}
public void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) {
LocalSession session = getLocalSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if (!Session.Status.PREPARE.equals(session.getStatus()))
throw new IllegalStateException("Session not prepared: " + sessionId);
}
private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) {
Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty();
TenantApplications applicationRepo = tenant.getApplicationRepo();
try {
long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId);
RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId);
if (currentActiveSession != null) {
currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded());
}
} catch (IllegalArgumentException e) {
}
return currentActiveApplicationSet;
}
public ConfigChangeActions prepare(Tenant tenant, long sessionId, DeployLogger logger, PrepareParams params) {
LocalSession session = getLocalSession(tenant, sessionId);
ApplicationId appId = params.getApplicationId();
Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, appId);
return session.prepare(logger, params, currentActiveApplicationSet, tenant.getPath());
}
private List<ApplicationId> listApplicationIds(Tenant tenant) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return applicationRepo.listApplications();
}
public long createSessionFromExisting(Tenant tenant, DeployLogger logger,
TimeoutBudget timeoutBudget, ApplicationId applicationId) {
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession fromSession = getExistingSession(tenant, applicationId);
LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public long createSession(Tenant tenant, TimeoutBudget timeoutBudget, File applicationDirectory, String applicationName) {
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession session = sessionFactory.createSession(applicationDirectory, applicationName, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
private void redeployApplication(ApplicationId applicationId, Deployer deployer, ExecutorService deploymentExecutor) {
log.log(LogLevel.DEBUG, () -> "Redeploying " + applicationId);
deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30))
.ifPresent(deployment -> deploymentExecutor.execute(() -> {
try {
deployment.activate();
} catch (RuntimeException e) {
log.log(LogLevel.ERROR, "Redeploying " + applicationId + " failed", e);
}
}));
}
public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) {
Tenant tenant = tenants.getTenant(tenantName);
return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode);
}
private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId));
}
} | class ApplicationRepository implements com.yahoo.config.provision.Deployer {
private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName());
private final Tenants tenants;
private final Optional<Provisioner> hostProvisioner;
private final Curator curator;
private final LogServerLogGrabber logServerLogGrabber;
private final ApplicationConvergenceChecker convergeChecker;
private final HttpProxy httpProxy;
private final Clock clock;
private final DeployLogger logger = new SilentDeployLogger();
private final ConfigserverConfig configserverConfig;
private final Environment environment;
@Inject
public ApplicationRepository(Tenants tenants,
HostProvisionerProvider hostProvisionerProvider,
Curator curator,
LogServerLogGrabber logServerLogGrabber,
ApplicationConvergenceChecker applicationConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig) {
this.tenants = tenants;
this.hostProvisioner = hostProvisionerProvider.getHostProvisioner();
this.curator = curator;
this.logServerLogGrabber = logServerLogGrabber;
this.convergeChecker = applicationConvergenceChecker;
this.httpProxy = httpProxy;
this.clock = Clock.systemUTC();
this.configserverConfig = configserverConfig;
this.environment = Environment.from(configserverConfig.environment());
}
/**
* Creates a new deployment from the active application, if available.
*
* @param application the active application to be redeployed
* @param timeout the timeout to use for each individual deployment operation
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) {
Tenant tenant = tenants.getTenant(application.tenant());
LocalSession activeSession = tenant.getLocalSessionRepo().getActiveSession(application);
if (activeSession == null) return Optional.empty();
TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout);
LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, timeoutBudget);
tenant.getLocalSessionRepo().addSession(newSession);
Version version = environment.isManuallyDeployed() ? Vtag.currentVersion : newSession.getVespaVersion();
return Optional.of(Deployment.unprepared(newSession,
tenant.getLocalSessionRepo(),
tenant.getPath(),
hostProvisioner,
new ActivateLock(curator, tenant.getPath()),
timeout,
clock,
false,
version));
}
public Deployment deployFromPreparedSession(LocalSession session, ActivateLock lock, LocalSessionRepo localSessionRepo, Duration timeout) {
return Deployment.prepared(session,
localSessionRepo,
hostProvisioner,
lock,
timeout,
clock);
}
/**
* Removes a previously deployed application
*
* @return true if the application was found and removed, false if it was not present
* @throws RuntimeException if the remove transaction fails. This method is exception safe.
*/
public boolean remove(ApplicationId applicationId) {
Optional<Tenant> owner = Optional.ofNullable(tenants.getTenant(applicationId.tenant()));
if ( ! owner.isPresent()) return false;
TenantApplications tenantApplications = owner.get().getApplicationRepo();
if ( ! tenantApplications.listApplications().contains(applicationId)) return false;
long sessionId = tenantApplications.getSessionIdForApplication(applicationId);
LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo();
LocalSession session = localSessionRepo.getSession(sessionId);
if (session == null) return false;
NestedTransaction transaction = new NestedTransaction();
localSessionRepo.removeSession(session.getSessionId(), transaction);
session.delete(transaction);
transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId));
transaction.add(tenantApplications.deleteApplication(applicationId));
hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId));
transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId));
transaction.commit();
return true;
}
public String grabLog(Tenant tenant, ApplicationId applicationId) {
Application application = getApplication(tenant, applicationId);
return logServerLogGrabber.grabLog(application);
}
public HttpResponse serviceConvergenceCheck(Tenant tenant, ApplicationId applicationId, String hostname, URI uri) {
Application application = getApplication(tenant, applicationId);
return convergeChecker.serviceConvergenceCheck(application, hostname, uri);
}
public HttpResponse serviceListToCheckForConfigConvergence(Tenant tenant, ApplicationId applicationId, URI uri) {
Application application = getApplication(tenant, applicationId);
return convergeChecker.serviceListToCheckForConfigConvergence(application, uri);
}
public HttpResponse clusterControllerStatusPage(
Tenant tenant,
ApplicationId applicationId,
String hostName,
String pathSuffix) {
Application application = getApplication(tenant, applicationId);
String relativePath = "clustercontroller-status/" + pathSuffix;
return httpProxy.get(application, hostName, "container-clustercontroller", relativePath);
}
public Long getApplicationGeneration(Tenant tenant, ApplicationId applicationId) {
return getApplication(tenant, applicationId).getApplicationGeneration();
}
private Application getApplication(Tenant tenant, ApplicationId applicationId) {
long sessionId = getSessionIdForApplication(tenant, applicationId);
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0);
return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty());
}
public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) {
return tenant.getApplicationRepo().getSessionIdForApplication(applicationId);
}
private LocalSession getLocalSession(Tenant tenant, long sessionId) {
LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private RemoteSession getRemoteSession(Tenant tenant, long sessionId) {
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
public void restart(ApplicationId applicationId, HostFilter hostFilter) {
hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter));
}
public Tenant verifyTenantAndApplication(ApplicationId applicationId) {
TenantName tenantName = applicationId.tenant();
if (!tenants.checkThatTenantExists(tenantName)) {
throw new IllegalArgumentException("Tenant " + tenantName + " was not found.");
}
Tenant tenant = tenants.getTenant(tenantName);
List<ApplicationId> applicationIds = listApplicationIds(tenant);
if (!applicationIds.contains(applicationId)) {
throw new IllegalArgumentException("No such application id: " + applicationId);
}
return tenant;
}
public ApplicationId activate(Tenant tenant,
long sessionId,
TimeoutBudget timeoutBudget,
boolean ignoreLockFailure,
boolean ignoreSessionStaleFailure) {
LocalSession localSession = getLocalSession(tenant, sessionId);
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
ActivateLock activateLock = tenant.getActivateLock();
Deployment deployment = deployFromPreparedSession(localSession,
activateLock,
localSessionRepo,
timeoutBudget.timeLeft());
deployment.setIgnoreLockFailure(ignoreLockFailure);
deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure);
deployment.activate();
return localSession.getApplicationId();
}
public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) {
return getLocalSession(tenant, sessionId).getMetaData();
}
public void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) {
LocalSession session = getLocalSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if (!Session.Status.PREPARE.equals(session.getStatus()))
throw new IllegalStateException("Session not prepared: " + sessionId);
}
private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) {
Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty();
TenantApplications applicationRepo = tenant.getApplicationRepo();
try {
long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId);
RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId);
if (currentActiveSession != null) {
currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded());
}
} catch (IllegalArgumentException e) {
}
return currentActiveApplicationSet;
}
public ConfigChangeActions prepare(Tenant tenant, long sessionId, DeployLogger logger, PrepareParams params) {
LocalSession session = getLocalSession(tenant, sessionId);
ApplicationId appId = params.getApplicationId();
Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, appId);
return session.prepare(logger, params, currentActiveApplicationSet, tenant.getPath());
}
private List<ApplicationId> listApplicationIds(Tenant tenant) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return applicationRepo.listApplications();
}
public long createSessionFromExisting(Tenant tenant, DeployLogger logger,
TimeoutBudget timeoutBudget, ApplicationId applicationId) {
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession fromSession = getExistingSession(tenant, applicationId);
LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public long createSession(Tenant tenant, TimeoutBudget timeoutBudget, File applicationDirectory, String applicationName) {
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession session = sessionFactory.createSession(applicationDirectory, applicationName, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
private void redeployApplication(ApplicationId applicationId, Deployer deployer, ExecutorService deploymentExecutor) {
log.log(LogLevel.DEBUG, () -> "Redeploying " + applicationId);
deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30))
.ifPresent(deployment -> deploymentExecutor.execute(() -> {
try {
deployment.activate();
} catch (RuntimeException e) {
log.log(LogLevel.ERROR, "Redeploying " + applicationId + " failed", e);
}
}));
}
public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) {
Tenant tenant = tenants.getTenant(tenantName);
return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode);
}
private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId));
}
} |
We're also getting allNodes above. Getting lots of nodes from the repo is costly, can it be done more efficiently? | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | List<Node> applicationNodes = nodeRepository().getNodes(applicationId); | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
We should allow for some number of simultaneous retires per cluster in a later PR. | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION); | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
The previous version here as questioned by @bjorncs was slightly different in behaviour than the Scala version. This change should bring it closer. | private static Optional<String> detectResourceOrProvider(ClassLoader bundleClassLoader, String classEntry) {
try (InputStream inputStream = getResourceAsStream(bundleClassLoader, classEntry)) {
ResourceOrProviderClassVisitor visitor = ResourceOrProviderClassVisitor.visit(new ClassReader(inputStream));
return Optional.ofNullable(visitor.getClassName());
} catch (IOException e) {
throw new RuntimeException(e);
}
} | } | private static Optional<String> detectResourceOrProvider(ClassLoader bundleClassLoader, String classEntry) {
try (InputStream inputStream = getResourceAsStream(bundleClassLoader, classEntry)) {
ResourceOrProviderClassVisitor visitor = ResourceOrProviderClassVisitor.visit(new ClassReader(inputStream));
return Optional.ofNullable(visitor.getClassName());
} catch (IOException e) {
throw new RuntimeException(e);
}
} | class JerseyServletProvider implements Provider<ServletHolder> {
private final ServletHolder jerseyServletHolder;
public JerseyServletProvider(RestApiContext restApiContext) {
this.jerseyServletHolder = new ServletHolder(new ServletContainer(resourceConfig(restApiContext)));
}
private ResourceConfig resourceConfig(RestApiContext restApiContext) {
final ResourceConfig resourceConfig = ResourceConfig
.forApplication(new JerseyApplication(resourcesAndProviders(restApiContext.getBundles())));
registerComponent(resourceConfig, componentInjectorBinder(restApiContext));
registerComponent(resourceConfig, jacksonDatatypeJdk8Provider());
resourceConfig.register(MultiPartFeature.class);
return resourceConfig;
}
private static Collection<Class<?>> resourcesAndProviders(Collection<BundleInfo> bundles) {
final List<Class<?>> ret = new ArrayList<>();
for (BundleInfo bundle : bundles) {
for (String classEntry : bundle.getClassEntries()) {
Optional<String> className = detectResourceOrProvider(bundle.classLoader, classEntry);
className.ifPresent(cname -> ret.add(loadClass(bundle.symbolicName, bundle.classLoader, cname)));
}
}
return ret;
}
private static InputStream getResourceAsStream(ClassLoader bundleClassLoader, String classEntry) {
InputStream is = bundleClassLoader.getResourceAsStream(classEntry);
if (is == null) {
throw new RuntimeException("No entry " + classEntry + " in bundle " + bundleClassLoader);
} else {
return is;
}
}
private static Class<?> loadClass(String bundleSymbolicName, ClassLoader classLoader, String className) {
try {
return classLoader.loadClass(className);
} catch (Exception e) {
throw new RuntimeException("Failed loading class " + className + " from bundle " + bundleSymbolicName, e);
}
}
private static Binder componentInjectorBinder(RestApiContext restApiContext) {
final ComponentGraphProvider componentGraphProvider = new ComponentGraphProvider(restApiContext.getInjectableComponents());
final TypeLiteral<InjectionResolver<Component>> componentAnnotationType = new TypeLiteral<InjectionResolver<Component>>() {
};
return new AbstractBinder() {
@Override
public void configure() {
bind(componentGraphProvider).to(componentAnnotationType);
}
};
}
private static JacksonJaxbJsonProvider jacksonDatatypeJdk8Provider() {
JacksonJaxbJsonProvider provider = new JacksonJaxbJsonProvider();
provider.setMapper(new ObjectMapper().registerModule(new Jdk8Module()).registerModule(new JavaTimeModule()));
return provider;
}
@Override
public ServletHolder get() {
return jerseyServletHolder;
}
@Override
public void deconstruct() {
}
} | class JerseyServletProvider implements Provider<ServletHolder> {
private final ServletHolder jerseyServletHolder;
public JerseyServletProvider(RestApiContext restApiContext) {
this.jerseyServletHolder = new ServletHolder(new ServletContainer(resourceConfig(restApiContext)));
}
private ResourceConfig resourceConfig(RestApiContext restApiContext) {
final ResourceConfig resourceConfig = ResourceConfig
.forApplication(new JerseyApplication(resourcesAndProviders(restApiContext.getBundles())));
registerComponent(resourceConfig, componentInjectorBinder(restApiContext));
registerComponent(resourceConfig, jacksonDatatypeJdk8Provider());
resourceConfig.register(MultiPartFeature.class);
return resourceConfig;
}
private static Collection<Class<?>> resourcesAndProviders(Collection<BundleInfo> bundles) {
final List<Class<?>> ret = new ArrayList<>();
for (BundleInfo bundle : bundles) {
for (String classEntry : bundle.getClassEntries()) {
Optional<String> className = detectResourceOrProvider(bundle.classLoader, classEntry);
className.ifPresent(cname -> ret.add(loadClass(bundle.symbolicName, bundle.classLoader, cname)));
}
}
return ret;
}
private static InputStream getResourceAsStream(ClassLoader bundleClassLoader, String classEntry) {
InputStream is = bundleClassLoader.getResourceAsStream(classEntry);
if (is == null) {
throw new RuntimeException("No entry " + classEntry + " in bundle " + bundleClassLoader);
} else {
return is;
}
}
private static Class<?> loadClass(String bundleSymbolicName, ClassLoader classLoader, String className) {
try {
return classLoader.loadClass(className);
} catch (Exception e) {
throw new RuntimeException("Failed loading class " + className + " from bundle " + bundleSymbolicName, e);
}
}
private static Binder componentInjectorBinder(RestApiContext restApiContext) {
final ComponentGraphProvider componentGraphProvider = new ComponentGraphProvider(restApiContext.getInjectableComponents());
final TypeLiteral<InjectionResolver<Component>> componentAnnotationType = new TypeLiteral<InjectionResolver<Component>>() {
};
return new AbstractBinder() {
@Override
public void configure() {
bind(componentGraphProvider).to(componentAnnotationType);
}
};
}
private static JacksonJaxbJsonProvider jacksonDatatypeJdk8Provider() {
JacksonJaxbJsonProvider provider = new JacksonJaxbJsonProvider();
provider.setMapper(new ObjectMapper().registerModule(new Jdk8Module()).registerModule(new JavaTimeModule()));
return provider;
}
@Override
public ServletHolder get() {
return jerseyServletHolder;
}
@Override
public void deconstruct() {
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.