comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
... and wantToDeprovision | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | log.info("Setting wantToRetire for host " + retireableNode.hostname() + | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
RetiredExpirer doesn't lock the application. Is it OK, for example, to call activate() with the application lock held? Why do we need to hold the application lock for the block anyway? | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | try (Mutex lock = nodeRepository().lock(applicationId)) { | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
"new" in possibleNewFlavors and newFlavors -> "replacement"? | private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
return possibleNewFlavors;
} | Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>(); | private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
return possibleReplacementFlavors;
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfSpares);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfSpares();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of spare nodes of flavor f and all replacee flavors of f is > 0</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2a for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleNewFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getSumOfReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleNewFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
}
return possibleNewFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfReady);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfReady();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of ready nodes of flavor f and all replacee flavors of f is > reserved (set by {@link SpareNodesPolicy}</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2 for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleReplacementFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getNumReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleNewFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
}
return possibleReplacementFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} |
Nit: SumOf -> Num (it's not possible to add/sum two ready nodes) | private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleNewFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getSumOfReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleNewFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
}
return possibleNewFlavors;
} | if (possibleNewFlavor.getSumOfReadyAmongReplacees() == 0) continue; | private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleReplacementFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getNumReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleNewFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
}
return possibleReplacementFlavors;
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfSpares);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfSpares();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of spare nodes of flavor f and all replacee flavors of f is > 0</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2a for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
return possibleNewFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfReady);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfReady();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of ready nodes of flavor f and all replacee flavors of f is > reserved (set by {@link SpareNodesPolicy}</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2 for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
return possibleReplacementFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} |
New -> Replacement (if accepted above) | private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleNewFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getSumOfReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleNewFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
}
return possibleNewFlavors;
} | Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>(); | private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleReplacementFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getNumReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleNewFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
}
return possibleReplacementFlavors;
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfSpares);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfSpares();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of spare nodes of flavor f and all replacee flavors of f is > 0</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2a for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
return possibleNewFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfReady);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfReady();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of ready nodes of flavor f and all replacee flavors of f is > reserved (set by {@link SpareNodesPolicy}</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2 for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
return possibleReplacementFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} |
We are getting nodes 3 places: * All nodes in `retireUnallocated` * All nodes in `retireAllocated` * Nodes belonging to an application under the lock of that application The first two could be joined, I'm not sure about the second one... Don't we need be sure of the exact state the application is in when we are potetially retiring some of its nodes? Either way, it's makes fewer requests than v1, since v1 would do # 3 on all config servers, whereas v2 only does it one. | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | List<Node> applicationNodes = nodeRepository().getNodes(applicationId); | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
Fixed. | private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleNewFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getSumOfReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleNewFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
}
return possibleNewFlavors;
} | if (possibleNewFlavor.getSumOfReadyAmongReplacees() == 0) continue; | private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleReplacementFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getNumReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleNewFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
}
return possibleReplacementFlavors;
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfSpares);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfSpares();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of spare nodes of flavor f and all replacee flavors of f is > 0</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2a for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
return possibleNewFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfReady);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfReady();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of ready nodes of flavor f and all replacee flavors of f is > reserved (set by {@link SpareNodesPolicy}</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2 for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
return possibleReplacementFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} |
Fixed. | private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleNewFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getSumOfReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleNewFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
}
return possibleNewFlavors;
} | Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>(); | private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleReplacementFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getNumReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleNewFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
}
return possibleReplacementFlavors;
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfSpares);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfSpares();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of spare nodes of flavor f and all replacee flavors of f is > 0</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2a for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
return possibleNewFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfReady);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfReady();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of ready nodes of flavor f and all replacee flavors of f is > reserved (set by {@link SpareNodesPolicy}</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2 for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
return possibleReplacementFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} |
Fixed. | private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
return possibleNewFlavors;
} | Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>(); | private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleWantedFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
return possibleReplacementFlavors;
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfSpares);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfSpares();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of spare nodes of flavor f and all replacee flavors of f is > 0</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2a for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleNewFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleNewFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getSumOfReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> newFlavors = verifyReplacementConditions(possibleNewFlavor);
if (newFlavors.isEmpty()) return Collections.emptySet();
else possibleNewFlavors.addAll(newFlavors);
}
}
return possibleNewFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} | class FlavorSpareChecker {
private final SpareNodesPolicy spareNodesPolicy;
private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
this.spareNodesPolicy = spareNodesPolicy;
this.spareCountByFlavor = spareCountByFlavor;
}
public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
flavorSpareCount.updateReadyAndActiveCounts(
numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
numberOfNodesByState.getOrDefault(Node.State.active, 0L));
});
}
public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfReady);
return !possibleNewFlavors.isEmpty();
}
public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
flavorSpareCount.decrementNumberOfReady();
return true;
}
return false;
}
/**
* Returns a set of possible new flavors that can replace this flavor given current node allocation.
* If the set is empty, there are not enough spare nodes to safely retire this flavor.
* <p>
* The algorithm is:
* for all possible wanted flavor, check:
* <ul>
* <li>1: Sum of ready nodes of flavor f and all replacee flavors of f is > reserved (set by {@link SpareNodesPolicy}</li>
* <li>2a: Number of ready nodes of flavor f is > 0</li>
* <li>2b: Verify 1 & 2 for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
* replacee flavors of f_i is > 0</li>
* </ul>
* Only 2a OR 2b need to be satisfied.
*/
private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
if (flavorSpareCount.hasReady()) {
possibleReplacementFlavors.add(flavorSpareCount);
} else {
for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
if (possibleNewFlavor.getNumReadyAmongReplacees() == 0) continue;
Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleNewFlavor);
if (replacementFlavors.isEmpty()) return Collections.emptySet();
else possibleReplacementFlavors.addAll(replacementFlavors);
}
}
return possibleReplacementFlavors;
}
public interface SpareNodesPolicy {
boolean hasSpare(FlavorSpareCount flavorSpareCount);
}
} |
Fixed. | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | log.info("Setting wantToRetire for host " + retireableNode.hostname() + | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
Moved activate outside the block. We still need the block since it is potentially making changes to active nodes though? | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | try (Mutex lock = nodeRepository().lock(applicationId)) { | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
The first one also gets the nodes under `lockUnallocated`, so not sure if can be joined with the second one. | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | List<Node> applicationNodes = nodeRepository().getNodes(applicationId); | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
As discussed: 1. lock on each application, then: get nodes for application, then update (write) nodes 2. Move activate outside the locks to minimize time the locks are held 3. The nodes to decide the applications and flavor graph can be generated once outside of any locks, as a simplification that will likely not be a problem | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
for (ApplicationId applicationId : activeApplications) {
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
try (Mutex lock = nodeRepository().lock(applicationId)) {
List<Node> applicationNodes = nodeRepository().getNodes(applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
long numNodesWantedToRetire = 0;
for (Iterator<Node> iterator = retireableNodes.iterator(); iterator.hasNext() && numNodesAllowedToRetire > numNodesWantedToRetire; ) {
Node retireableNode = iterator.next();
if (flavorSpareChecker.canRetireAllocatedNodeWithFlavor(retireableNode.flavor())) {
log.info("Setting wantToRetire for host " + retireableNode.hostname() +
" with flavor " + retireableNode.flavor().name() +
" allocated to " + retireableNode.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = retireableNode.with(retireableNode.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
numNodesWantedToRetire++;
}
}
if (numNodesWantedToRetire > 0) deployment.get().activate();
}
}
} | try (Mutex lock = nodeRepository().lock(applicationId)) { | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getSumOfReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
That's not the most important thing this block is trying to accomplish: It's making sure an up-to-date node is fetched from node repo, mutated, and saved back, all under the application lock. This avoid overwriting fields others may have modified on the node since above. | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | |
Add a comment explaining why it's outside the above loop. | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | if (! nodesToRetire.isEmpty()) deployment.activate(); | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
List<Node> applicationNodes = getNodesBelongingToApplication(allNodes, applicationId);
Set<Node> retireableNodes = getRetireableNodesForApplication(applicationNodes);
long numNodesAllowedToRetire = getNumberNodesAllowToRetireForApplication(applicationNodes, MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION);
if (retireableNodes.isEmpty() || numNodesAllowedToRetire == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodes.stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(numNodesAllowedToRetire)
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_APPLICATION = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(List<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(List<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return Set of nodes that all should eventually be retired
*/
Set<Node> getRetireableNodesForApplication(List<Node> applicationNodes) {
return applicationNodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param applicationNodes All the nodes allocated to an application
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForApplication(List<Node> applicationNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = applicationNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(List<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
Need to also pass the `feedEndpoint` argument from https://github.com/yahoo/vespa/blob/89b7b95346eacff1dfedf03828de888043dfd940/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java#L243 The chef recipe is only for non-docker nodes, while StorageMaintainer is the one that calls Maintainer for docker nodes (node-admin). | private static void parseHandleCoreDumps(Inspector arguments) {
Path coredumpsPath = Paths.get(getFieldOrFail(arguments, "coredumpsPath").asString());
Path doneCoredumpsPath = Paths.get(getFieldOrFail(arguments, "doneCoredumpsPath").asString());
Map<String, Object> attributesMap = parseMap(arguments);
Optional<Path> yinstStatePath = SlimeUtils.optionalString(arguments.field("yinstStatePath")).map(Paths::get);
String feedEndpoint = getFieldOrFail(arguments, "feedEndpoint").asString();
try {
CoredumpHandler coredumpHandler = new CoredumpHandler(httpClient, coreCollector, coredumpsPath,
doneCoredumpsPath, attributesMap, yinstStatePath,
feedEndpoint);
coredumpHandler.processAll();
} catch (IOException e) {
throw new RuntimeException("Failed processing coredumps at " + coredumpsPath.toAbsolutePath() +
", moving fished dumps to " + doneCoredumpsPath.toAbsolutePath(), e);
}
} | String feedEndpoint = getFieldOrFail(arguments, "feedEndpoint").asString(); | private static void parseHandleCoreDumps(Inspector arguments) {
Path coredumpsPath = Paths.get(getFieldOrFail(arguments, "coredumpsPath").asString());
Path doneCoredumpsPath = Paths.get(getFieldOrFail(arguments, "doneCoredumpsPath").asString());
Map<String, Object> attributesMap = parseMap(arguments);
Optional<Path> yinstStatePath = SlimeUtils.optionalString(arguments.field("yinstStatePath")).map(Paths::get);
String feedEndpoint = getFieldOrFail(arguments, "feedEndpoint").asString();
try {
CoredumpHandler coredumpHandler = new CoredumpHandler(httpClient, coreCollector, coredumpsPath,
doneCoredumpsPath, attributesMap, yinstStatePath,
feedEndpoint);
coredumpHandler.processAll();
} catch (IOException e) {
throw new RuntimeException("Failed processing coredumps at " + coredumpsPath.toAbsolutePath() +
", moving fished dumps to " + doneCoredumpsPath.toAbsolutePath(), e);
}
} | class Maintainer {
private static final CoreCollector coreCollector = new CoreCollector(new ProcessExecuter());
private static final HttpClient httpClient = createHttpClient(Duration.ofSeconds(5));
public static void main(String[] args) {
LogSetup.initVespaLogging("node-maintainer");
if (args.length != 1) {
throw new RuntimeException("Expected only 1 argument - a JSON list of maintainer jobs to execute");
}
Inspector object = SlimeUtils.jsonToSlime(args[0].getBytes()).get();
if (object.type() != Type.ARRAY) {
throw new IllegalArgumentException("Expected a list of maintainer jobs to execute");
}
AtomicInteger numberOfJobsFailed = new AtomicInteger(0);
object.traverse((ArrayTraverser) (int i, Inspector item) -> {
try {
String type = getFieldOrFail(item, "type").asString();
Inspector arguments = getFieldOrFail(item, "arguments");
parseMaintenanceJob(type, arguments);
} catch (Exception e) {
System.err.println("Failed executing job: " + item.toString());
e.printStackTrace();
numberOfJobsFailed.incrementAndGet();
}
});
if (numberOfJobsFailed.get() > 0) {
System.err.println(numberOfJobsFailed.get() + " of jobs has failed");
System.exit(1);
}
}
private static void parseMaintenanceJob(String type, Inspector arguments) {
if (arguments.type() != Type.OBJECT) {
throw new IllegalArgumentException("Expected a 'arguments' to be an object");
}
switch (type) {
case "delete-files":
parseDeleteFilesJob(arguments);
break;
case "delete-directories":
parseDeleteDirectoriesJob(arguments);
break;
case "recursive-delete":
parseRecursiveDelete(arguments);
break;
case "move-files":
parseMoveFiles(arguments);
break;
case "handle-core-dumps":
parseHandleCoreDumps(arguments);
break;
default:
throw new IllegalArgumentException("Unknown job: " + type);
}
}
private static void parseDeleteFilesJob(Inspector arguments) {
Path basePath = Paths.get(getFieldOrFail(arguments, "basePath").asString());
Duration maxAge = Duration.ofSeconds(getFieldOrFail(arguments, "maxAgeSeconds").asLong());
Optional<String> fileNameRegex = SlimeUtils.optionalString(arguments.field("fileNameRegex"));
boolean recursive = getFieldOrFail(arguments, "recursive").asBool();
try {
FileHelper.deleteFiles(basePath, maxAge, fileNameRegex, recursive);
} catch (IOException e) {
throw new RuntimeException("Failed deleting files under " + basePath.toAbsolutePath() +
fileNameRegex.map(regex -> ", matching '" + regex + "'").orElse("") +
", " + (recursive ? "" : "not ") + "recursively" +
" and older than " + maxAge, e);
}
}
private static void parseDeleteDirectoriesJob(Inspector arguments) {
Path basePath = Paths.get(getFieldOrFail(arguments, "basePath").asString());
Duration maxAge = Duration.ofSeconds(getFieldOrFail(arguments, "maxAgeSeconds").asLong());
Optional<String> dirNameRegex = SlimeUtils.optionalString(arguments.field("dirNameRegex"));
try {
FileHelper.deleteDirectories(basePath, maxAge, dirNameRegex);
} catch (IOException e) {
throw new RuntimeException("Failed deleting directories under " + basePath.toAbsolutePath() +
dirNameRegex.map(regex -> ", matching '" + regex + "'").orElse("") +
" and older than " + maxAge, e);
}
}
private static void parseRecursiveDelete(Inspector arguments) {
Path basePath = Paths.get(getFieldOrFail(arguments, "path").asString());
try {
FileHelper.recursiveDelete(basePath);
} catch (IOException e) {
throw new RuntimeException("Failed deleting " + basePath.toAbsolutePath(), e);
}
}
private static void parseMoveFiles(Inspector arguments) {
Path from = Paths.get(getFieldOrFail(arguments, "from").asString());
Path to = Paths.get(getFieldOrFail(arguments, "to").asString());
try {
FileHelper.moveIfExists(from, to);
} catch (IOException e) {
throw new RuntimeException("Failed moving from " + from.toAbsolutePath() + ", to " + to.toAbsolutePath(), e);
}
}
private static Map<String, Object> parseMap(Inspector object) {
Map<String, Object> map = new HashMap<>();
getFieldOrFail(object, "attributes").traverse((String key, Inspector value) -> {
switch (value.type()) {
case BOOL:
map.put(key, value.asBool());
break;
case LONG:
map.put(key, value.asLong());
break;
case DOUBLE:
map.put(key, value.asDouble());
break;
case STRING:
map.put(key, value.asString());
break;
default:
throw new IllegalArgumentException("Invalid attribute for key '" + key + "', value " + value);
}
});
return map;
}
private static Inspector getFieldOrFail(Inspector object, String key) {
Inspector out = object.field(key);
if (out.type() == Type.NIX) {
throw new IllegalArgumentException("Key '" + key + "' was not found!");
}
return out;
}
private static HttpClient createHttpClient(Duration timeout) {
int timeoutInMillis = (int) timeout.toMillis();
return HttpClientBuilder.create()
.setDefaultRequestConfig(RequestConfig.custom()
.setConnectTimeout(timeoutInMillis)
.setConnectionRequestTimeout(timeoutInMillis)
.setSocketTimeout(timeoutInMillis)
.build())
.build();
}
} | class Maintainer {
private static final CoreCollector coreCollector = new CoreCollector(new ProcessExecuter());
private static final HttpClient httpClient = createHttpClient(Duration.ofSeconds(5));
public static void main(String[] args) {
LogSetup.initVespaLogging("node-maintainer");
if (args.length != 1) {
throw new RuntimeException("Expected only 1 argument - a JSON list of maintainer jobs to execute");
}
Inspector object = SlimeUtils.jsonToSlime(args[0].getBytes()).get();
if (object.type() != Type.ARRAY) {
throw new IllegalArgumentException("Expected a list of maintainer jobs to execute");
}
AtomicInteger numberOfJobsFailed = new AtomicInteger(0);
object.traverse((ArrayTraverser) (int i, Inspector item) -> {
try {
String type = getFieldOrFail(item, "type").asString();
Inspector arguments = getFieldOrFail(item, "arguments");
parseMaintenanceJob(type, arguments);
} catch (Exception e) {
System.err.println("Failed executing job: " + item.toString());
e.printStackTrace();
numberOfJobsFailed.incrementAndGet();
}
});
if (numberOfJobsFailed.get() > 0) {
System.err.println(numberOfJobsFailed.get() + " of jobs has failed");
System.exit(1);
}
}
private static void parseMaintenanceJob(String type, Inspector arguments) {
if (arguments.type() != Type.OBJECT) {
throw new IllegalArgumentException("Expected a 'arguments' to be an object");
}
switch (type) {
case "delete-files":
parseDeleteFilesJob(arguments);
break;
case "delete-directories":
parseDeleteDirectoriesJob(arguments);
break;
case "recursive-delete":
parseRecursiveDelete(arguments);
break;
case "move-files":
parseMoveFiles(arguments);
break;
case "handle-core-dumps":
parseHandleCoreDumps(arguments);
break;
default:
throw new IllegalArgumentException("Unknown job: " + type);
}
}
private static void parseDeleteFilesJob(Inspector arguments) {
Path basePath = Paths.get(getFieldOrFail(arguments, "basePath").asString());
Duration maxAge = Duration.ofSeconds(getFieldOrFail(arguments, "maxAgeSeconds").asLong());
Optional<String> fileNameRegex = SlimeUtils.optionalString(arguments.field("fileNameRegex"));
boolean recursive = getFieldOrFail(arguments, "recursive").asBool();
try {
FileHelper.deleteFiles(basePath, maxAge, fileNameRegex, recursive);
} catch (IOException e) {
throw new RuntimeException("Failed deleting files under " + basePath.toAbsolutePath() +
fileNameRegex.map(regex -> ", matching '" + regex + "'").orElse("") +
", " + (recursive ? "" : "not ") + "recursively" +
" and older than " + maxAge, e);
}
}
private static void parseDeleteDirectoriesJob(Inspector arguments) {
Path basePath = Paths.get(getFieldOrFail(arguments, "basePath").asString());
Duration maxAge = Duration.ofSeconds(getFieldOrFail(arguments, "maxAgeSeconds").asLong());
Optional<String> dirNameRegex = SlimeUtils.optionalString(arguments.field("dirNameRegex"));
try {
FileHelper.deleteDirectories(basePath, maxAge, dirNameRegex);
} catch (IOException e) {
throw new RuntimeException("Failed deleting directories under " + basePath.toAbsolutePath() +
dirNameRegex.map(regex -> ", matching '" + regex + "'").orElse("") +
" and older than " + maxAge, e);
}
}
private static void parseRecursiveDelete(Inspector arguments) {
Path basePath = Paths.get(getFieldOrFail(arguments, "path").asString());
try {
FileHelper.recursiveDelete(basePath);
} catch (IOException e) {
throw new RuntimeException("Failed deleting " + basePath.toAbsolutePath(), e);
}
}
private static void parseMoveFiles(Inspector arguments) {
Path from = Paths.get(getFieldOrFail(arguments, "from").asString());
Path to = Paths.get(getFieldOrFail(arguments, "to").asString());
try {
FileHelper.moveIfExists(from, to);
} catch (IOException e) {
throw new RuntimeException("Failed moving from " + from.toAbsolutePath() + ", to " + to.toAbsolutePath(), e);
}
}
private static Map<String, Object> parseMap(Inspector object) {
Map<String, Object> map = new HashMap<>();
getFieldOrFail(object, "attributes").traverse((String key, Inspector value) -> {
switch (value.type()) {
case BOOL:
map.put(key, value.asBool());
break;
case LONG:
map.put(key, value.asLong());
break;
case DOUBLE:
map.put(key, value.asDouble());
break;
case STRING:
map.put(key, value.asString());
break;
default:
throw new IllegalArgumentException("Invalid attribute for key '" + key + "', value " + value);
}
});
return map;
}
private static Inspector getFieldOrFail(Inspector object, String key) {
Inspector out = object.field(key);
if (out.type() == Type.NIX) {
throw new IllegalArgumentException("Key '" + key + "' was not found!");
}
return out;
}
private static HttpClient createHttpClient(Duration timeout) {
int timeoutInMillis = (int) timeout.toMillis();
return HttpClientBuilder.create()
.setDefaultRequestConfig(RequestConfig.custom()
.setConnectTimeout(timeoutInMillis)
.setConnectionRequestTimeout(timeoutInMillis)
.setSocketTimeout(timeoutInMillis)
.build())
.build();
}
} |
This only gives the public constructor. I think you need to use getDeclaredConstructors to get to the one you want here. | private static ConfigInstance getInstance(ConfigDefinitionKey cKey, ClassLoader instanceLoader) {
String className = ConfigGenerator.createClassName(cKey.getName());
Class<?> clazz;
String fullClassName = packageName(cKey) + "." + className;
try {
clazz = instanceLoader != null ? instanceLoader.loadClass(fullClassName) : Class.forName(fullClassName);
} catch (ClassNotFoundException e) {
return null;
}
Object i;
try {
Constructor configConstructor = clazz.getConstructor();
configConstructor.setAccessible(true);
i = configConstructor.newInstance();
} catch (InvocationTargetException | InstantiationException | IllegalAccessException | NoSuchMethodException e) {
throw new ConfigurationRuntimeException(e);
}
if (!(i instanceof ConfigInstance)) {
throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance, can not produce config for the name '" + cKey.getName() + "'.");
}
return (ConfigInstance) i;
} | Constructor configConstructor = clazz.getConstructor(); | private static ConfigInstance getInstance(ConfigDefinitionKey cKey, ClassLoader instanceLoader) {
String className = ConfigGenerator.createClassName(cKey.getName());
Class<?> clazz;
String fullClassName = packageName(cKey) + "." + className;
try {
clazz = instanceLoader != null ? instanceLoader.loadClass(fullClassName) : Class.forName(fullClassName);
} catch (ClassNotFoundException e) {
return null;
}
Object i;
try {
Constructor<?> configConstructor = clazz.getDeclaredConstructor();
configConstructor.setAccessible(true);
i = configConstructor.newInstance();
} catch (InvocationTargetException | InstantiationException | IllegalAccessException | NoSuchMethodException e) {
throw new ConfigurationRuntimeException(e);
}
if (!(i instanceof ConfigInstance)) {
throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance, can not produce config for the name '" + cKey.getName() + "'.");
}
return (ConfigInstance) i;
} | class InstanceResolver {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(InstanceResolver.class.getName());
/**
* Resolves this config key into a correctly typed ConfigInstance using the given config builder.
* FIXME: Make private once config overrides are deprecated.?
*
* @param key a ConfigKey
* @param builder a ConfigBuilder to create the instance from.
* @param targetDef the def to use
* @return the config instance or null of no producer for this found in model
*/
static ConfigInstance resolveToInstance(ConfigKey<?> key, ConfigBuilder builder, InnerCNode targetDef) {
ConfigDefinitionKey defKey = new ConfigDefinitionKey(key);
try {
if (targetDef != null) applyDef(builder, targetDef);
ConfigInstance instance = getInstance(defKey, builder.getClass().getClassLoader());
Class<? extends ConfigInstance> clazz = instance.getClass();
return clazz.getConstructor(new Class<?>[]{builder.getClass()}).newInstance(builder);
} catch (Exception e) {
throw new ConfigurationRuntimeException(e);
}
}
/**
* Resolves this config key into a correctly typed ConfigBuilder using the given config model.
* FIXME: Make private once config overrides are deprecated.?
*
* @return the config builder or null if no producer for this found in model
*/
static ConfigBuilder resolveToBuilder(ConfigKey<?> key, VespaModel model, ConfigDefinition targetDef) {
if (model == null) return null;
ConfigDefinitionKey defKey = new ConfigDefinitionKey(key);
ConfigInstance.Builder builder = model.createBuilder(defKey, targetDef);
model.getConfig(builder, key.getConfigId());
return builder;
}
/**
* If some fields on the builder are null now, set them from the def. Do recursively.
* <p>
* If the targetDef has some schema incompatibilities, they are not handled here
* (except logging in some cases), but in ConfigInstance.serialize().
*
* @param builder a {@link com.yahoo.config.ConfigBuilder}
* @param targetDef a config definition
* @throws Exception if applying values form config definitions fails
*/
static void applyDef(ConfigBuilder builder, InnerCNode targetDef) throws Exception {
for (Map.Entry<String, CNode> e: targetDef.children().entrySet()) {
CNode node = e.getValue();
if (node instanceof LeafCNode) {
setLeafValueIfUnset(targetDef, builder, (LeafCNode)node);
} else if (node instanceof InnerCNode) {
if (hasField(builder.getClass(), node.getName())) {
Field innerField = builder.getClass().getDeclaredField(node.getName());
innerField.setAccessible(true);
Object innerFieldVal = innerField.get(builder);
if (innerFieldVal instanceof List) {
List<?> innerList = (List<?>) innerFieldVal;
for (Object b : innerList) {
if (b instanceof ConfigBuilder) {
applyDef((ConfigBuilder) b, (InnerCNode) node);
}
}
} else if (innerFieldVal instanceof ConfigBuilder) {
applyDef((ConfigBuilder) innerFieldVal, (InnerCNode) node);
} else {
}
}
}
}
}
private static boolean hasField(Class<?> aClass, String name) {
for (Field field : aClass.getDeclaredFields()) {
if (name.equals(field.getName())) {
return true;
}
}
return false;
}
private static void setLeafValueIfUnset(InnerCNode targetDef, Object builder, LeafCNode node) throws Exception {
if (hasField(builder.getClass(), node.getName())) {
Field field = builder.getClass().getDeclaredField(node.getName());
field.setAccessible(true);
Object val = field.get(builder);
if (val==null) {
try {
if (node.getDefaultValue()!=null) {
Method setter = builder.getClass().getDeclaredMethod(node.getName(), String.class);
setter.setAccessible(true);
setter.invoke(builder, node.getDefaultValue().getValue());
}
} catch (Exception e) {
log.severe("For config '"+targetDef.getFullName()+"': Unable to apply the default value for field '"+node.getName()+
"' to config Builder (where it wasn't set): "+
Exceptions.toMessageString(e));
}
}
}
}
/**
* Create a ConfigBuilder given a definition key and a payload
* @param key The key to use to create the correct builder.
* @param payload The payload to populate the builder with.
* @return A ConfigBuilder initialized with payload.
*/
static ConfigBuilder createBuilderFromPayload(ConfigDefinitionKey key, VespaModel model, ConfigPayload payload, ConfigDefinition targetDef) {
ConfigBuilder builderInstance = model.createBuilder(key, targetDef);
if (builderInstance == null || builderInstance instanceof GenericConfig.GenericConfigBuilder) {
if (log.isLoggable(LogLevel.SPAM)) {
log.log(LogLevel.SPAM, "Creating generic builder for key=" + key);
}
return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder(payload));
}
ConfigTransformer transformer = new ConfigTransformer(builderInstance.getClass().getDeclaringClass());
return transformer.toConfigBuilder(payload);
}
/**
* Returns a {@link ConfigInstance} of right type for given key using reflection
*
* @param cKey a ConfigKey
* @return a {@link ConfigInstance} or null if not available in classpath
*/
static String packageName(ConfigDefinitionKey cKey) {
String prefix = "com.yahoo.";
return prefix + (cKey.getNamespace().isEmpty() ? CNode.DEFAULT_NAMESPACE : cKey.getNamespace());
}
} | class InstanceResolver {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(InstanceResolver.class.getName());
/**
* Resolves this config key into a correctly typed ConfigInstance using the given config builder.
* FIXME: Make private once config overrides are deprecated.?
*
* @param key a ConfigKey
* @param builder a ConfigBuilder to create the instance from.
* @param targetDef the def to use
* @return the config instance or null of no producer for this found in model
*/
static ConfigInstance resolveToInstance(ConfigKey<?> key, ConfigBuilder builder, InnerCNode targetDef) {
ConfigDefinitionKey defKey = new ConfigDefinitionKey(key);
try {
if (targetDef != null) applyDef(builder, targetDef);
ConfigInstance instance = getInstance(defKey, builder.getClass().getClassLoader());
Class<? extends ConfigInstance> clazz = instance.getClass();
return clazz.getConstructor(new Class<?>[]{builder.getClass()}).newInstance(builder);
} catch (Exception e) {
throw new ConfigurationRuntimeException(e);
}
}
/**
* Resolves this config key into a correctly typed ConfigBuilder using the given config model.
* FIXME: Make private once config overrides are deprecated.?
*
* @return the config builder or null if no producer for this found in model
*/
static ConfigBuilder resolveToBuilder(ConfigKey<?> key, VespaModel model, ConfigDefinition targetDef) {
if (model == null) return null;
ConfigDefinitionKey defKey = new ConfigDefinitionKey(key);
ConfigInstance.Builder builder = model.createBuilder(defKey, targetDef);
model.getConfig(builder, key.getConfigId());
return builder;
}
/**
* If some fields on the builder are null now, set them from the def. Do recursively.
* <p>
* If the targetDef has some schema incompatibilities, they are not handled here
* (except logging in some cases), but in ConfigInstance.serialize().
*
* @param builder a {@link com.yahoo.config.ConfigBuilder}
* @param targetDef a config definition
* @throws Exception if applying values form config definitions fails
*/
static void applyDef(ConfigBuilder builder, InnerCNode targetDef) throws Exception {
for (Map.Entry<String, CNode> e: targetDef.children().entrySet()) {
CNode node = e.getValue();
if (node instanceof LeafCNode) {
setLeafValueIfUnset(targetDef, builder, (LeafCNode)node);
} else if (node instanceof InnerCNode) {
if (hasField(builder.getClass(), node.getName())) {
Field innerField = builder.getClass().getDeclaredField(node.getName());
innerField.setAccessible(true);
Object innerFieldVal = innerField.get(builder);
if (innerFieldVal instanceof List) {
List<?> innerList = (List<?>) innerFieldVal;
for (Object b : innerList) {
if (b instanceof ConfigBuilder) {
applyDef((ConfigBuilder) b, (InnerCNode) node);
}
}
} else if (innerFieldVal instanceof ConfigBuilder) {
applyDef((ConfigBuilder) innerFieldVal, (InnerCNode) node);
} else {
}
}
}
}
}
private static boolean hasField(Class<?> aClass, String name) {
for (Field field : aClass.getDeclaredFields()) {
if (name.equals(field.getName())) {
return true;
}
}
return false;
}
private static void setLeafValueIfUnset(InnerCNode targetDef, Object builder, LeafCNode node) throws Exception {
if (hasField(builder.getClass(), node.getName())) {
Field field = builder.getClass().getDeclaredField(node.getName());
field.setAccessible(true);
Object val = field.get(builder);
if (val==null) {
try {
if (node.getDefaultValue()!=null) {
Method setter = builder.getClass().getDeclaredMethod(node.getName(), String.class);
setter.setAccessible(true);
setter.invoke(builder, node.getDefaultValue().getValue());
}
} catch (Exception e) {
log.severe("For config '"+targetDef.getFullName()+"': Unable to apply the default value for field '"+node.getName()+
"' to config Builder (where it wasn't set): "+
Exceptions.toMessageString(e));
}
}
}
}
/**
* Create a ConfigBuilder given a definition key and a payload
* @param key The key to use to create the correct builder.
* @param payload The payload to populate the builder with.
* @return A ConfigBuilder initialized with payload.
*/
static ConfigBuilder createBuilderFromPayload(ConfigDefinitionKey key, VespaModel model, ConfigPayload payload, ConfigDefinition targetDef) {
ConfigBuilder builderInstance = model.createBuilder(key, targetDef);
if (builderInstance == null || builderInstance instanceof GenericConfig.GenericConfigBuilder) {
if (log.isLoggable(LogLevel.SPAM)) {
log.log(LogLevel.SPAM, "Creating generic builder for key=" + key);
}
return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder(payload));
}
ConfigTransformer transformer = new ConfigTransformer(builderInstance.getClass().getDeclaringClass());
return transformer.toConfigBuilder(payload);
}
/**
* Returns a {@link ConfigInstance} of right type for given key using reflection
*
* @param cKey a ConfigKey
* @return a {@link ConfigInstance} or null if not available in classpath
*/
static String packageName(ConfigDefinitionKey cKey) {
String prefix = "com.yahoo.";
return prefix + (cKey.getNamespace().isEmpty() ? CNode.DEFAULT_NAMESPACE : cKey.getNamespace());
}
} |
You are viewing an outdated version. I figured out quickly and fixed :) | private static ConfigInstance getInstance(ConfigDefinitionKey cKey, ClassLoader instanceLoader) {
String className = ConfigGenerator.createClassName(cKey.getName());
Class<?> clazz;
String fullClassName = packageName(cKey) + "." + className;
try {
clazz = instanceLoader != null ? instanceLoader.loadClass(fullClassName) : Class.forName(fullClassName);
} catch (ClassNotFoundException e) {
return null;
}
Object i;
try {
Constructor configConstructor = clazz.getConstructor();
configConstructor.setAccessible(true);
i = configConstructor.newInstance();
} catch (InvocationTargetException | InstantiationException | IllegalAccessException | NoSuchMethodException e) {
throw new ConfigurationRuntimeException(e);
}
if (!(i instanceof ConfigInstance)) {
throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance, can not produce config for the name '" + cKey.getName() + "'.");
}
return (ConfigInstance) i;
} | Constructor configConstructor = clazz.getConstructor(); | private static ConfigInstance getInstance(ConfigDefinitionKey cKey, ClassLoader instanceLoader) {
String className = ConfigGenerator.createClassName(cKey.getName());
Class<?> clazz;
String fullClassName = packageName(cKey) + "." + className;
try {
clazz = instanceLoader != null ? instanceLoader.loadClass(fullClassName) : Class.forName(fullClassName);
} catch (ClassNotFoundException e) {
return null;
}
Object i;
try {
Constructor<?> configConstructor = clazz.getDeclaredConstructor();
configConstructor.setAccessible(true);
i = configConstructor.newInstance();
} catch (InvocationTargetException | InstantiationException | IllegalAccessException | NoSuchMethodException e) {
throw new ConfigurationRuntimeException(e);
}
if (!(i instanceof ConfigInstance)) {
throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance, can not produce config for the name '" + cKey.getName() + "'.");
}
return (ConfigInstance) i;
} | class InstanceResolver {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(InstanceResolver.class.getName());
/**
* Resolves this config key into a correctly typed ConfigInstance using the given config builder.
* FIXME: Make private once config overrides are deprecated.?
*
* @param key a ConfigKey
* @param builder a ConfigBuilder to create the instance from.
* @param targetDef the def to use
* @return the config instance or null of no producer for this found in model
*/
static ConfigInstance resolveToInstance(ConfigKey<?> key, ConfigBuilder builder, InnerCNode targetDef) {
ConfigDefinitionKey defKey = new ConfigDefinitionKey(key);
try {
if (targetDef != null) applyDef(builder, targetDef);
ConfigInstance instance = getInstance(defKey, builder.getClass().getClassLoader());
Class<? extends ConfigInstance> clazz = instance.getClass();
return clazz.getConstructor(new Class<?>[]{builder.getClass()}).newInstance(builder);
} catch (Exception e) {
throw new ConfigurationRuntimeException(e);
}
}
/**
* Resolves this config key into a correctly typed ConfigBuilder using the given config model.
* FIXME: Make private once config overrides are deprecated.?
*
* @return the config builder or null if no producer for this found in model
*/
static ConfigBuilder resolveToBuilder(ConfigKey<?> key, VespaModel model, ConfigDefinition targetDef) {
if (model == null) return null;
ConfigDefinitionKey defKey = new ConfigDefinitionKey(key);
ConfigInstance.Builder builder = model.createBuilder(defKey, targetDef);
model.getConfig(builder, key.getConfigId());
return builder;
}
/**
* If some fields on the builder are null now, set them from the def. Do recursively.
* <p>
* If the targetDef has some schema incompatibilities, they are not handled here
* (except logging in some cases), but in ConfigInstance.serialize().
*
* @param builder a {@link com.yahoo.config.ConfigBuilder}
* @param targetDef a config definition
* @throws Exception if applying values form config definitions fails
*/
static void applyDef(ConfigBuilder builder, InnerCNode targetDef) throws Exception {
for (Map.Entry<String, CNode> e: targetDef.children().entrySet()) {
CNode node = e.getValue();
if (node instanceof LeafCNode) {
setLeafValueIfUnset(targetDef, builder, (LeafCNode)node);
} else if (node instanceof InnerCNode) {
if (hasField(builder.getClass(), node.getName())) {
Field innerField = builder.getClass().getDeclaredField(node.getName());
innerField.setAccessible(true);
Object innerFieldVal = innerField.get(builder);
if (innerFieldVal instanceof List) {
List<?> innerList = (List<?>) innerFieldVal;
for (Object b : innerList) {
if (b instanceof ConfigBuilder) {
applyDef((ConfigBuilder) b, (InnerCNode) node);
}
}
} else if (innerFieldVal instanceof ConfigBuilder) {
applyDef((ConfigBuilder) innerFieldVal, (InnerCNode) node);
} else {
}
}
}
}
}
private static boolean hasField(Class<?> aClass, String name) {
for (Field field : aClass.getDeclaredFields()) {
if (name.equals(field.getName())) {
return true;
}
}
return false;
}
private static void setLeafValueIfUnset(InnerCNode targetDef, Object builder, LeafCNode node) throws Exception {
if (hasField(builder.getClass(), node.getName())) {
Field field = builder.getClass().getDeclaredField(node.getName());
field.setAccessible(true);
Object val = field.get(builder);
if (val==null) {
try {
if (node.getDefaultValue()!=null) {
Method setter = builder.getClass().getDeclaredMethod(node.getName(), String.class);
setter.setAccessible(true);
setter.invoke(builder, node.getDefaultValue().getValue());
}
} catch (Exception e) {
log.severe("For config '"+targetDef.getFullName()+"': Unable to apply the default value for field '"+node.getName()+
"' to config Builder (where it wasn't set): "+
Exceptions.toMessageString(e));
}
}
}
}
/**
* Create a ConfigBuilder given a definition key and a payload
* @param key The key to use to create the correct builder.
* @param payload The payload to populate the builder with.
* @return A ConfigBuilder initialized with payload.
*/
static ConfigBuilder createBuilderFromPayload(ConfigDefinitionKey key, VespaModel model, ConfigPayload payload, ConfigDefinition targetDef) {
ConfigBuilder builderInstance = model.createBuilder(key, targetDef);
if (builderInstance == null || builderInstance instanceof GenericConfig.GenericConfigBuilder) {
if (log.isLoggable(LogLevel.SPAM)) {
log.log(LogLevel.SPAM, "Creating generic builder for key=" + key);
}
return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder(payload));
}
ConfigTransformer transformer = new ConfigTransformer(builderInstance.getClass().getDeclaringClass());
return transformer.toConfigBuilder(payload);
}
/**
* Returns a {@link ConfigInstance} of right type for given key using reflection
*
* @param cKey a ConfigKey
* @return a {@link ConfigInstance} or null if not available in classpath
*/
static String packageName(ConfigDefinitionKey cKey) {
String prefix = "com.yahoo.";
return prefix + (cKey.getNamespace().isEmpty() ? CNode.DEFAULT_NAMESPACE : cKey.getNamespace());
}
} | class InstanceResolver {
private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(InstanceResolver.class.getName());
/**
* Resolves this config key into a correctly typed ConfigInstance using the given config builder.
* FIXME: Make private once config overrides are deprecated.?
*
* @param key a ConfigKey
* @param builder a ConfigBuilder to create the instance from.
* @param targetDef the def to use
* @return the config instance or null of no producer for this found in model
*/
static ConfigInstance resolveToInstance(ConfigKey<?> key, ConfigBuilder builder, InnerCNode targetDef) {
ConfigDefinitionKey defKey = new ConfigDefinitionKey(key);
try {
if (targetDef != null) applyDef(builder, targetDef);
ConfigInstance instance = getInstance(defKey, builder.getClass().getClassLoader());
Class<? extends ConfigInstance> clazz = instance.getClass();
return clazz.getConstructor(new Class<?>[]{builder.getClass()}).newInstance(builder);
} catch (Exception e) {
throw new ConfigurationRuntimeException(e);
}
}
/**
* Resolves this config key into a correctly typed ConfigBuilder using the given config model.
* FIXME: Make private once config overrides are deprecated.?
*
* @return the config builder or null if no producer for this found in model
*/
static ConfigBuilder resolveToBuilder(ConfigKey<?> key, VespaModel model, ConfigDefinition targetDef) {
if (model == null) return null;
ConfigDefinitionKey defKey = new ConfigDefinitionKey(key);
ConfigInstance.Builder builder = model.createBuilder(defKey, targetDef);
model.getConfig(builder, key.getConfigId());
return builder;
}
/**
* If some fields on the builder are null now, set them from the def. Do recursively.
* <p>
* If the targetDef has some schema incompatibilities, they are not handled here
* (except logging in some cases), but in ConfigInstance.serialize().
*
* @param builder a {@link com.yahoo.config.ConfigBuilder}
* @param targetDef a config definition
* @throws Exception if applying values form config definitions fails
*/
static void applyDef(ConfigBuilder builder, InnerCNode targetDef) throws Exception {
for (Map.Entry<String, CNode> e: targetDef.children().entrySet()) {
CNode node = e.getValue();
if (node instanceof LeafCNode) {
setLeafValueIfUnset(targetDef, builder, (LeafCNode)node);
} else if (node instanceof InnerCNode) {
if (hasField(builder.getClass(), node.getName())) {
Field innerField = builder.getClass().getDeclaredField(node.getName());
innerField.setAccessible(true);
Object innerFieldVal = innerField.get(builder);
if (innerFieldVal instanceof List) {
List<?> innerList = (List<?>) innerFieldVal;
for (Object b : innerList) {
if (b instanceof ConfigBuilder) {
applyDef((ConfigBuilder) b, (InnerCNode) node);
}
}
} else if (innerFieldVal instanceof ConfigBuilder) {
applyDef((ConfigBuilder) innerFieldVal, (InnerCNode) node);
} else {
}
}
}
}
}
private static boolean hasField(Class<?> aClass, String name) {
for (Field field : aClass.getDeclaredFields()) {
if (name.equals(field.getName())) {
return true;
}
}
return false;
}
private static void setLeafValueIfUnset(InnerCNode targetDef, Object builder, LeafCNode node) throws Exception {
if (hasField(builder.getClass(), node.getName())) {
Field field = builder.getClass().getDeclaredField(node.getName());
field.setAccessible(true);
Object val = field.get(builder);
if (val==null) {
try {
if (node.getDefaultValue()!=null) {
Method setter = builder.getClass().getDeclaredMethod(node.getName(), String.class);
setter.setAccessible(true);
setter.invoke(builder, node.getDefaultValue().getValue());
}
} catch (Exception e) {
log.severe("For config '"+targetDef.getFullName()+"': Unable to apply the default value for field '"+node.getName()+
"' to config Builder (where it wasn't set): "+
Exceptions.toMessageString(e));
}
}
}
}
/**
* Create a ConfigBuilder given a definition key and a payload
* @param key The key to use to create the correct builder.
* @param payload The payload to populate the builder with.
* @return A ConfigBuilder initialized with payload.
*/
static ConfigBuilder createBuilderFromPayload(ConfigDefinitionKey key, VespaModel model, ConfigPayload payload, ConfigDefinition targetDef) {
ConfigBuilder builderInstance = model.createBuilder(key, targetDef);
if (builderInstance == null || builderInstance instanceof GenericConfig.GenericConfigBuilder) {
if (log.isLoggable(LogLevel.SPAM)) {
log.log(LogLevel.SPAM, "Creating generic builder for key=" + key);
}
return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder(payload));
}
ConfigTransformer transformer = new ConfigTransformer(builderInstance.getClass().getDeclaringClass());
return transformer.toConfigBuilder(payload);
}
/**
* Returns a {@link ConfigInstance} of right type for given key using reflection
*
* @param cKey a ConfigKey
* @return a {@link ConfigInstance} or null if not available in classpath
*/
static String packageName(ConfigDefinitionKey cKey) {
String prefix = "com.yahoo.";
return prefix + (cKey.getNamespace().isEmpty() ? CNode.DEFAULT_NAMESPACE : cKey.getNamespace());
}
} |
Isn't 1 sec timeout a little tight? | public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
nodeSpec.vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_NODE, dimensions, "alive").sample(1);
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
metricReceiver.declareGauge(APP, dimensions, "cpu.util").sample(cpuPercentageOfAllocated);
metricReceiver.declareGauge(APP, dimensions, "mem.limit").sample(memoryTotalBytes);
metricReceiver.declareGauge(APP, dimensions, "mem.used").sample(memoryTotalBytesUsed);
metricReceiver.declareGauge(APP, dimensions, "mem.util").sample(memoryPercentUsed);
diskTotalBytes.ifPresent(diskLimit -> metricReceiver.declareGauge(APP, dimensions, "disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> metricReceiver.declareGauge(APP, dimensions, "disk.used").sample(diskUsed));
diskPercentUsed.ifPresent(diskUtil -> metricReceiver.declareGauge(APP, dimensions, "disk.util").sample(diskUtil));
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
metricReceiver.declareGauge(APP, netDims, "net.in.bytes").sample(infStats.get("rx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.errors").sample(infStats.get("rx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.dropped").sample(infStats.get("rx_dropped").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.bytes").sample(infStats.get("tx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.errors").sample(infStats.get("tx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.dropped").sample(infStats.get("tx_dropped").longValue());
});
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
diskTotalBytes.ifPresent(diskLimit ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.used").sample(diskUsed));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
try {
dockerOperations.executeCommandInContainerAsRoot(containerName, 1L, "rpc_invoke", "-t 1", "tcp/localhost:19091", "setExtraMetrics", buildRPCArgumentFromMetrics());
} catch (DockerExecTimeoutException|JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
} | dockerOperations.executeCommandInContainerAsRoot(containerName, 1L, "rpc_invoke", "-t 1", "tcp/localhost:19091", "setExtraMetrics", buildRPCArgumentFromMetrics()); | public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
nodeSpec.vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_NODE, dimensions, "alive").sample(1);
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
metricReceiver.declareGauge(APP, dimensions, "cpu.util").sample(cpuPercentageOfAllocated);
metricReceiver.declareGauge(APP, dimensions, "mem.limit").sample(memoryTotalBytes);
metricReceiver.declareGauge(APP, dimensions, "mem.used").sample(memoryTotalBytesUsed);
metricReceiver.declareGauge(APP, dimensions, "mem.util").sample(memoryPercentUsed);
diskTotalBytes.ifPresent(diskLimit -> metricReceiver.declareGauge(APP, dimensions, "disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> metricReceiver.declareGauge(APP, dimensions, "disk.used").sample(diskUsed));
diskPercentUsed.ifPresent(diskUtil -> metricReceiver.declareGauge(APP, dimensions, "disk.util").sample(diskUtil));
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
metricReceiver.declareGauge(APP, netDims, "net.in.bytes").sample(infStats.get("rx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.errors").sample(infStats.get("rx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.dropped").sample(infStats.get("rx_dropped").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.bytes").sample(infStats.get("tx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.errors").sample(infStats.get("tx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.dropped").sample(infStats.get("tx_dropped").longValue());
});
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
diskTotalBytes.ifPresent(diskLimit ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.used").sample(diskUsed));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
try {
dockerOperations.executeCommandInContainerAsRoot(containerName, 1L, "rpc_invoke", "-t 1", "tcp/localhost:19091", "setExtraMetrics", buildRPCArgumentFromMetrics());
} catch (DockerExecTimeoutException|JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container is already running, setting containerState to " + containerState);
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
logger.info("Resume command successfully executed, new containerState is " + containerState);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter(clock.instant());
writeConfigs(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
dockerOperations.removeContainer(existingContainer);
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
if (container.isPresent()) {
writeConfigs(nodeSpec);
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
protected String buildRPCArgumentFromMetrics() throws JsonProcessingException {
StringBuilder params = new StringBuilder();
for (MetricReceiverWrapper.DimensionMetrics dimensionMetrics : metricReceiver.getAllMetrics()) {
params.append(dimensionMetrics.toSecretAgentReport());
}
return "s:'" + params.toString() + "'";
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private void writeConfigs(ContainerNodeSpec nodeSpec) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container is already running, setting containerState to " + containerState);
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
logger.info("Resume command successfully executed, new containerState is " + containerState);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter(clock.instant());
writeConfigs(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
dockerOperations.removeContainer(existingContainer);
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
if (container.isPresent()) {
writeConfigs(nodeSpec);
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
protected String buildRPCArgumentFromMetrics() throws JsonProcessingException {
StringBuilder params = new StringBuilder();
for (MetricReceiverWrapper.DimensionMetrics dimensionMetrics : metricReceiver.getAllMetrics()) {
params.append(dimensionMetrics.toSecretAgentReport());
}
return "s:'" + params.toString() + "'";
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private void writeConfigs(ContainerNodeSpec nodeSpec) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Please add whitespace around '|' | public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
nodeSpec.vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_NODE, dimensions, "alive").sample(1);
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
metricReceiver.declareGauge(APP, dimensions, "cpu.util").sample(cpuPercentageOfAllocated);
metricReceiver.declareGauge(APP, dimensions, "mem.limit").sample(memoryTotalBytes);
metricReceiver.declareGauge(APP, dimensions, "mem.used").sample(memoryTotalBytesUsed);
metricReceiver.declareGauge(APP, dimensions, "mem.util").sample(memoryPercentUsed);
diskTotalBytes.ifPresent(diskLimit -> metricReceiver.declareGauge(APP, dimensions, "disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> metricReceiver.declareGauge(APP, dimensions, "disk.used").sample(diskUsed));
diskPercentUsed.ifPresent(diskUtil -> metricReceiver.declareGauge(APP, dimensions, "disk.util").sample(diskUtil));
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
metricReceiver.declareGauge(APP, netDims, "net.in.bytes").sample(infStats.get("rx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.errors").sample(infStats.get("rx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.dropped").sample(infStats.get("rx_dropped").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.bytes").sample(infStats.get("tx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.errors").sample(infStats.get("tx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.dropped").sample(infStats.get("tx_dropped").longValue());
});
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
diskTotalBytes.ifPresent(diskLimit ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.used").sample(diskUsed));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
try {
dockerOperations.executeCommandInContainerAsRoot(containerName, 1L, "rpc_invoke", "-t 1", "tcp/localhost:19091", "setExtraMetrics", buildRPCArgumentFromMetrics());
} catch (DockerExecTimeoutException|JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
} | } catch (DockerExecTimeoutException|JsonProcessingException e) { | public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
nodeSpec.vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_NODE, dimensions, "alive").sample(1);
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
metricReceiver.declareGauge(APP, dimensions, "cpu.util").sample(cpuPercentageOfAllocated);
metricReceiver.declareGauge(APP, dimensions, "mem.limit").sample(memoryTotalBytes);
metricReceiver.declareGauge(APP, dimensions, "mem.used").sample(memoryTotalBytesUsed);
metricReceiver.declareGauge(APP, dimensions, "mem.util").sample(memoryPercentUsed);
diskTotalBytes.ifPresent(diskLimit -> metricReceiver.declareGauge(APP, dimensions, "disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> metricReceiver.declareGauge(APP, dimensions, "disk.used").sample(diskUsed));
diskPercentUsed.ifPresent(diskUtil -> metricReceiver.declareGauge(APP, dimensions, "disk.util").sample(diskUtil));
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
metricReceiver.declareGauge(APP, netDims, "net.in.bytes").sample(infStats.get("rx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.errors").sample(infStats.get("rx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.dropped").sample(infStats.get("rx_dropped").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.bytes").sample(infStats.get("tx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.errors").sample(infStats.get("tx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.dropped").sample(infStats.get("tx_dropped").longValue());
});
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
diskTotalBytes.ifPresent(diskLimit ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.used").sample(diskUsed));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
try {
dockerOperations.executeCommandInContainerAsRoot(containerName, 1L, "rpc_invoke", "-t 1", "tcp/localhost:19091", "setExtraMetrics", buildRPCArgumentFromMetrics());
} catch (DockerExecTimeoutException|JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container is already running, setting containerState to " + containerState);
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
logger.info("Resume command successfully executed, new containerState is " + containerState);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter(clock.instant());
writeConfigs(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
dockerOperations.removeContainer(existingContainer);
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
if (container.isPresent()) {
writeConfigs(nodeSpec);
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
protected String buildRPCArgumentFromMetrics() throws JsonProcessingException {
StringBuilder params = new StringBuilder();
for (MetricReceiverWrapper.DimensionMetrics dimensionMetrics : metricReceiver.getAllMetrics()) {
params.append(dimensionMetrics.toSecretAgentReport());
}
return "s:'" + params.toString() + "'";
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private void writeConfigs(ContainerNodeSpec nodeSpec) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container is already running, setting containerState to " + containerState);
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
logger.info("Resume command successfully executed, new containerState is " + containerState);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter(clock.instant());
writeConfigs(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
dockerOperations.removeContainer(existingContainer);
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
if (container.isPresent()) {
writeConfigs(nodeSpec);
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
protected String buildRPCArgumentFromMetrics() throws JsonProcessingException {
StringBuilder params = new StringBuilder();
for (MetricReceiverWrapper.DimensionMetrics dimensionMetrics : metricReceiver.getAllMetrics()) {
params.append(dimensionMetrics.toSecretAgentReport());
}
return "s:'" + params.toString() + "'";
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private void writeConfigs(ContainerNodeSpec nodeSpec) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Please store this long string in a few separate constants (one for routing, one for metrics etc.), append to one constant and use that single constant here. | public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
Map<String, Object> precpu_stats = dockerStats.get("precpu_stats");
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance");
ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false);
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(storageMaintainer.updateIfNeededAndGetDiskMetricsFor(eq(containerName))).thenReturn(Optional.of(42547019776L));
when(dockerOperations.getContainerStats(eq(containerName)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics(5);
clock.advance(Duration.ofSeconds(1234));
nodeAgent.updateContainerNodeMetrics(5);
File expectedMetricsFile = new File(classLoader.getResource("docker.stats.metrics.active.expected.json").getFile());
Set<Map<String, Object>> expectedMetrics = objectMapper.readValue(expectedMetricsFile, Set.class);
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
String arg = nodeAgent.buildRPCArgumentFromMetrics();
arg = arg.replaceAll("\"timestamp\":\\d+", "\"timestamp\":0");
assertEquals("s:'{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"mem.limit\":4.294967296E9,\"mem.used\":1.073741824E9,\"alive\":1.0,\"disk.used\":4.2547019776E10,\"disk.util\":15.85,\"cpu.util\":6.75,\"disk.limit\":2.68435456E11,\"mem.util\":25.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"net.out.bytes\":2.0303455E7,\"net.out.dropped\":13.0,\"net.in.dropped\":4.0,\"net.in.bytes\":1.949927E7,\"net.out.errors\":3.0,\"net.in.errors\":55.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth0\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"net.out.bytes\":5.4246745E7,\"net.out.dropped\":0.0,\"net.in.dropped\":0.0,\"net.in.bytes\":3245766.0,\"net.out.errors\":0.0,\"net.in.errors\":0.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth1\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"host_life\",\"metrics\":{\"alive\":1.0,\"uptime\":1234.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.disk.limit\":2.68435456E11,\"node.disk.used\":4.2547019776E10,\"node.memory.usage\":1.073741824E9,\"node.cpu.busy.pct\":6.75,\"node.cpu.throttled_time\":4523.0,\"node.memory.limit\":4.294967296E9,\"node.alive\":1.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.net.in.dropped\":4.0,\"node.net.out.errors\":3.0,\"node.net.out.bytes\":2.0303455E7,\"node.net.in.bytes\":1.949927E7,\"node.net.out.dropped\":13.0,\"node.net.in.errors\":55.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth0\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.net.in.dropped\":0.0,\"node.net.out.errors\":0.0,\"node.net.out.bytes\":5.4246745E7,\"node.net.in.bytes\":3245766.0,\"node.net.out.dropped\":0.0,\"node.net.in.errors\":0.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth1\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}'", arg);
assertEquals(expectedMetrics, actualMetrics);
} | assertEquals("s:'{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"mem.limit\":4.294967296E9,\"mem.used\":1.073741824E9,\"alive\":1.0,\"disk.used\":4.2547019776E10,\"disk.util\":15.85,\"cpu.util\":6.75,\"disk.limit\":2.68435456E11,\"mem.util\":25.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"net.out.bytes\":2.0303455E7,\"net.out.dropped\":13.0,\"net.in.dropped\":4.0,\"net.in.bytes\":1.949927E7,\"net.out.errors\":3.0,\"net.in.errors\":55.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth0\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"net.out.bytes\":5.4246745E7,\"net.out.dropped\":0.0,\"net.in.dropped\":0.0,\"net.in.bytes\":3245766.0,\"net.out.errors\":0.0,\"net.in.errors\":0.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth1\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"host_life\",\"metrics\":{\"alive\":1.0,\"uptime\":1234.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.disk.limit\":2.68435456E11,\"node.disk.used\":4.2547019776E10,\"node.memory.usage\":1.073741824E9,\"node.cpu.busy.pct\":6.75,\"node.cpu.throttled_time\":4523.0,\"node.memory.limit\":4.294967296E9,\"node.alive\":1.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.net.in.dropped\":4.0,\"node.net.out.errors\":3.0,\"node.net.out.bytes\":2.0303455E7,\"node.net.in.bytes\":1.949927E7,\"node.net.out.dropped\":13.0,\"node.net.in.errors\":55.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth0\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.net.in.dropped\":0.0,\"node.net.out.errors\":0.0,\"node.net.out.bytes\":5.4246745E7,\"node.net.in.bytes\":3245766.0,\"node.net.out.dropped\":0.0,\"node.net.in.errors\":0.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth1\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}'", arg); | public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
Map<String, Object> precpu_stats = dockerStats.get("precpu_stats");
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance");
ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false);
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(storageMaintainer.updateIfNeededAndGetDiskMetricsFor(eq(containerName))).thenReturn(Optional.of(42547019776L));
when(dockerOperations.getContainerStats(eq(containerName)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics(5);
clock.advance(Duration.ofSeconds(1234));
nodeAgent.updateContainerNodeMetrics(5);
File expectedMetricsFile = new File(classLoader.getResource("docker.stats.metrics.active.expected.json").getFile());
Set<Map<String, Object>> expectedMetrics = objectMapper.readValue(expectedMetricsFile, Set.class);
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
String arg = nodeAgent.buildRPCArgumentFromMetrics();
arg = arg.replaceAll("\"timestamp\":\\d+", "\"timestamp\":0");
assertEquals("s:'{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"mem.limit\":4.294967296E9,\"mem.used\":1.073741824E9,\"alive\":1.0,\"disk.used\":4.2547019776E10,\"disk.util\":15.85,\"cpu.util\":6.75,\"disk.limit\":2.68435456E11,\"mem.util\":25.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"net.out.bytes\":2.0303455E7,\"net.out.dropped\":13.0,\"net.in.dropped\":4.0,\"net.in.bytes\":1.949927E7,\"net.out.errors\":3.0,\"net.in.errors\":55.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth0\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"vespa.node\",\"metrics\":{\"net.out.bytes\":5.4246745E7,\"net.out.dropped\":0.0,\"net.in.dropped\":0.0,\"net.in.bytes\":3245766.0,\"net.out.errors\":0.0,\"net.in.errors\":0.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth1\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"host_life\",\"metrics\":{\"alive\":1.0,\"uptime\":1234.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.disk.limit\":2.68435456E11,\"node.disk.used\":4.2547019776E10,\"node.memory.usage\":1.073741824E9,\"node.cpu.busy.pct\":6.75,\"node.cpu.throttled_time\":4523.0,\"node.memory.limit\":4.294967296E9,\"node.alive\":1.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.net.in.dropped\":4.0,\"node.net.out.errors\":3.0,\"node.net.out.bytes\":2.0303455E7,\"node.net.in.bytes\":1.949927E7,\"node.net.out.dropped\":13.0,\"node.net.in.errors\":55.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth0\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}{\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}},\"application\":\"docker\",\"metrics\":{\"node.net.in.dropped\":0.0,\"node.net.out.errors\":0.0,\"node.net.out.bytes\":5.4246745E7,\"node.net.in.bytes\":3245766.0,\"node.net.out.dropped\":0.0,\"node.net.in.errors\":0.0},\"dimensions\":{\"app\":\"testapp.testinstance\",\"role\":\"tenants\",\"instanceName\":\"testinstance\",\"vespaVersion\":\"1.2.3\",\"clusterid\":\"clustId\",\"interface\":\"eth1\",\"parentHostname\":\"parent.host.name.yahoo.com\",\"flavor\":\"docker\",\"clustertype\":\"clustType\",\"tenantName\":\"tester\",\"zone\":\"dev.us-east-1\",\"host\":\"host1.test.yahoo.com\",\"state\":\"active\",\"applicationId\":\"tester.testapp.testinstance\",\"applicationName\":\"testapp\"},\"timestamp\":0}'", arg);
assertEquals(expectedMetrics, actualMetrics);
} | class NodeAgentImplTest {
private static final double MIN_CPU_CORES = 2;
private static final double MIN_MAIN_MEMORY_AVAILABLE_GB = 16;
private static final double MIN_DISK_AVAILABLE_GB = 250;
private static final String vespaVersion = "1.2.3";
private final String hostName = "host1.test.yahoo.com";
private final ContainerName containerName = new ContainerName("host1");
private final DockerImage dockerImage = new DockerImage("dockerImage");
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final PathResolver pathResolver = mock(PathResolver.class);
private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.environment("dev")
.region("us-east-1")
.parentHostHostname("parent.host.name.yahoo.com")
.inetAddressResolver(new InetAddressResolver())
.pathResolver(pathResolver).build();
private final ContainerNodeSpec.Builder nodeSpecBuilder = new ContainerNodeSpec.Builder()
.hostname(hostName)
.nodeType("tenant")
.nodeFlavor("docker")
.minCpuCores(MIN_CPU_CORES)
.minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
.minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
@Test
public void upToDateContainerIsUntouched() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository);
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
inOrder.verify(orchestrator).resume(hostName);
}
@Test
public void absentContainerCausesStart() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository, aclMaintainer);
inOrder.verify(aclMaintainer, times(1)).run();
inOrder.verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
inOrder.verify(orchestrator).resume(hostName);
}
@Test
public void containerIsNotStoppedIfNewImageMustBePulled() throws Exception {
final DockerImage newDockerImage = new DockerImage("new-image");
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(newDockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true);
nodeAgent.converge();
verify(orchestrator, never()).suspend(any(String.class));
verify(orchestrator, never()).resume(any(String.class));
verify(dockerOperations, never()).removeContainer(any());
final InOrder inOrder = inOrder(dockerOperations);
inOrder.verify(dockerOperations, times(1)).shouldScheduleDownloadOfImage(eq(newDockerImage));
inOrder.verify(dockerOperations, times(1)).scheduleDownloadOfImage(eq(containerName), eq(newDockerImage), any());
}
@Test
public void noRestartIfOrchestratorSuspendFails() throws Exception {
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (Exception ignored) { }
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class));
}
@Test
public void failedNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.failed)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void readyNodeLeadsToNoAction() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null,false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
nodeAgent.converge();
nodeAgent.converge();
verify(dockerOperations, times(1)).getContainer(eq(containerName));
verify(dockerOperations, never()).removeContainer(any());
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
@Test
public void inactiveNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.inactive)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void reservedNodeDoesNotUpdateNodeRepoWithVersion() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.reserved)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) {
wantedRestartGeneration.ifPresent(restartGeneration -> nodeSpecBuilder
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration));
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(nodeState)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainer(any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeData(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markNodeAvailableForNewAllocation(eq(hostName));
verify(dockerOperations, never()).startContainer(eq(containerName), any());
verify(orchestrator, never()).resume(any(String.class));
verify(orchestrator, never()).suspend(any(String.class));
verify(nodeRepository, times(1)).updateNodeAttributes(
any(String.class), eq(new NodeAttributes()
.withRestartGeneration(wantedRestartGeneration.orElse(null))
.withRebootGeneration(0L)
.withDockerImage(new DockerImage(""))
.withVespaVersion("")));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty());
}
@Test
public void provisionedNodeIsMarkedAsDirty() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.provisioned)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository, times(1)).markAsDirty(eq(hostName));
}
@Test
public void testRestartDeadContainerAfterNodeAdminRestart() throws IOException {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.tick();
verify(dockerOperations, times(1)).removeContainer(any());
verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
}
@Test
public void resumeProgramRunsUntilSuccess() throws Exception {
final long restartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
final InOrder inOrder = inOrder(orchestrator, dockerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
.doNothing()
.when(dockerOperations).resumeNode(eq(containerName));
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (RuntimeException ignored) { }
inOrder.verify(dockerOperations, times(1)).resumeNode(any());
inOrder.verifyNoMoreInteractions();
nodeAgent.converge();
inOrder.verify(dockerOperations).resumeNode(any());
inOrder.verify(orchestrator).resume(hostName);
inOrder.verifyNoMoreInteractions();
}
@Test
public void testSetFrozen() {
NodeAgentImpl nodeAgent = spy(makeNodeAgent(dockerImage, true));
doNothing().when(nodeAgent).converge();
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(true));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertTrue(nodeAgent.setFrozen(true));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(false));
nodeAgent.tick();
verify(nodeAgent, times(2)).converge();
assertTrue(nodeAgent.setFrozen(false));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(3)).converge();
}
@Test
@SuppressWarnings("unchecked")
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetricsForReadyNode() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(Optional.empty());
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics(5);
File expectedMetricsFile = new File(classLoader.getResource("docker.stats.metrics.ready.expected.json").getFile());
Set<Map<String, Object>> expectedMetrics = objectMapper.readValue(expectedMetricsFile, Set.class);
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
assertEquals(expectedMetrics, actualMetrics);
}
private NodeAgentImpl makeNodeAgent(DockerImage dockerImage, boolean isRunning) {
Optional<Container> container = dockerImage != null ?
Optional.of(new Container(
hostName,
dockerImage,
containerName,
isRunning ? Container.State.RUNNING : Container.State.EXITED,
isRunning ? 1 : 0,
clock.instant().toString())) :
Optional.empty();
when(dockerOperations.getContainerStats(any())).thenReturn(Optional.of(emptyContainerStats));
when(dockerOperations.getContainer(eq(containerName))).thenReturn(container);
doNothing().when(storageMaintainer).writeFilebeatConfig(any(), any());
doNothing().when(storageMaintainer).writeMetricsConfig(any(), any());
return new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations,
Optional.of(storageMaintainer), metricReceiver, environment, clock, Optional.of(aclMaintainer));
}
} | class NodeAgentImplTest {
private static final double MIN_CPU_CORES = 2;
private static final double MIN_MAIN_MEMORY_AVAILABLE_GB = 16;
private static final double MIN_DISK_AVAILABLE_GB = 250;
private static final String vespaVersion = "1.2.3";
private final String hostName = "host1.test.yahoo.com";
private final ContainerName containerName = new ContainerName("host1");
private final DockerImage dockerImage = new DockerImage("dockerImage");
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final PathResolver pathResolver = mock(PathResolver.class);
private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.environment("dev")
.region("us-east-1")
.parentHostHostname("parent.host.name.yahoo.com")
.inetAddressResolver(new InetAddressResolver())
.pathResolver(pathResolver).build();
private final ContainerNodeSpec.Builder nodeSpecBuilder = new ContainerNodeSpec.Builder()
.hostname(hostName)
.nodeType("tenant")
.nodeFlavor("docker")
.minCpuCores(MIN_CPU_CORES)
.minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
.minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
@Test
public void upToDateContainerIsUntouched() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository);
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
inOrder.verify(orchestrator).resume(hostName);
}
@Test
public void absentContainerCausesStart() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository, aclMaintainer);
inOrder.verify(aclMaintainer, times(1)).run();
inOrder.verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
inOrder.verify(orchestrator).resume(hostName);
}
@Test
public void containerIsNotStoppedIfNewImageMustBePulled() throws Exception {
final DockerImage newDockerImage = new DockerImage("new-image");
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(newDockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true);
nodeAgent.converge();
verify(orchestrator, never()).suspend(any(String.class));
verify(orchestrator, never()).resume(any(String.class));
verify(dockerOperations, never()).removeContainer(any());
final InOrder inOrder = inOrder(dockerOperations);
inOrder.verify(dockerOperations, times(1)).shouldScheduleDownloadOfImage(eq(newDockerImage));
inOrder.verify(dockerOperations, times(1)).scheduleDownloadOfImage(eq(containerName), eq(newDockerImage), any());
}
@Test
public void noRestartIfOrchestratorSuspendFails() throws Exception {
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (Exception ignored) { }
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class));
}
@Test
public void failedNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.failed)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void readyNodeLeadsToNoAction() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null,false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
nodeAgent.converge();
nodeAgent.converge();
verify(dockerOperations, times(1)).getContainer(eq(containerName));
verify(dockerOperations, never()).removeContainer(any());
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
@Test
public void inactiveNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.inactive)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void reservedNodeDoesNotUpdateNodeRepoWithVersion() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.reserved)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) {
wantedRestartGeneration.ifPresent(restartGeneration -> nodeSpecBuilder
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration));
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(nodeState)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainer(any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeData(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markNodeAvailableForNewAllocation(eq(hostName));
verify(dockerOperations, never()).startContainer(eq(containerName), any());
verify(orchestrator, never()).resume(any(String.class));
verify(orchestrator, never()).suspend(any(String.class));
verify(nodeRepository, times(1)).updateNodeAttributes(
any(String.class), eq(new NodeAttributes()
.withRestartGeneration(wantedRestartGeneration.orElse(null))
.withRebootGeneration(0L)
.withDockerImage(new DockerImage(""))
.withVespaVersion("")));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty());
}
@Test
public void provisionedNodeIsMarkedAsDirty() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.provisioned)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository, times(1)).markAsDirty(eq(hostName));
}
@Test
public void testRestartDeadContainerAfterNodeAdminRestart() throws IOException {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.tick();
verify(dockerOperations, times(1)).removeContainer(any());
verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
}
@Test
public void resumeProgramRunsUntilSuccess() throws Exception {
final long restartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
final InOrder inOrder = inOrder(orchestrator, dockerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
.doNothing()
.when(dockerOperations).resumeNode(eq(containerName));
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (RuntimeException ignored) { }
inOrder.verify(dockerOperations, times(1)).resumeNode(any());
inOrder.verifyNoMoreInteractions();
nodeAgent.converge();
inOrder.verify(dockerOperations).resumeNode(any());
inOrder.verify(orchestrator).resume(hostName);
inOrder.verifyNoMoreInteractions();
}
@Test
public void testSetFrozen() {
NodeAgentImpl nodeAgent = spy(makeNodeAgent(dockerImage, true));
doNothing().when(nodeAgent).converge();
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(true));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertTrue(nodeAgent.setFrozen(true));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(false));
nodeAgent.tick();
verify(nodeAgent, times(2)).converge();
assertTrue(nodeAgent.setFrozen(false));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(3)).converge();
}
@Test
@SuppressWarnings("unchecked")
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetricsForReadyNode() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(Optional.empty());
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics(5);
File expectedMetricsFile = new File(classLoader.getResource("docker.stats.metrics.ready.expected.json").getFile());
Set<Map<String, Object>> expectedMetrics = objectMapper.readValue(expectedMetricsFile, Set.class);
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
assertEquals(expectedMetrics, actualMetrics);
}
private NodeAgentImpl makeNodeAgent(DockerImage dockerImage, boolean isRunning) {
Optional<Container> container = dockerImage != null ?
Optional.of(new Container(
hostName,
dockerImage,
containerName,
isRunning ? Container.State.RUNNING : Container.State.EXITED,
isRunning ? 1 : 0,
clock.instant().toString())) :
Optional.empty();
when(dockerOperations.getContainerStats(any())).thenReturn(Optional.of(emptyContainerStats));
when(dockerOperations.getContainer(eq(containerName))).thenReturn(container);
doNothing().when(storageMaintainer).writeFilebeatConfig(any(), any());
doNothing().when(storageMaintainer).writeMetricsConfig(any(), any());
return new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations,
Optional.of(storageMaintainer), metricReceiver, environment, clock, Optional.of(aclMaintainer));
}
} |
Checked on a random host with 30 running containers, it timed out 7 times over last 30 days. | public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
nodeSpec.vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_NODE, dimensions, "alive").sample(1);
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
metricReceiver.declareGauge(APP, dimensions, "cpu.util").sample(cpuPercentageOfAllocated);
metricReceiver.declareGauge(APP, dimensions, "mem.limit").sample(memoryTotalBytes);
metricReceiver.declareGauge(APP, dimensions, "mem.used").sample(memoryTotalBytesUsed);
metricReceiver.declareGauge(APP, dimensions, "mem.util").sample(memoryPercentUsed);
diskTotalBytes.ifPresent(diskLimit -> metricReceiver.declareGauge(APP, dimensions, "disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> metricReceiver.declareGauge(APP, dimensions, "disk.used").sample(diskUsed));
diskPercentUsed.ifPresent(diskUtil -> metricReceiver.declareGauge(APP, dimensions, "disk.util").sample(diskUtil));
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
metricReceiver.declareGauge(APP, netDims, "net.in.bytes").sample(infStats.get("rx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.errors").sample(infStats.get("rx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.dropped").sample(infStats.get("rx_dropped").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.bytes").sample(infStats.get("tx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.errors").sample(infStats.get("tx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.dropped").sample(infStats.get("tx_dropped").longValue());
});
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
diskTotalBytes.ifPresent(diskLimit ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.used").sample(diskUsed));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
try {
dockerOperations.executeCommandInContainerAsRoot(containerName, 1L, "rpc_invoke", "-t 1", "tcp/localhost:19091", "setExtraMetrics", buildRPCArgumentFromMetrics());
} catch (DockerExecTimeoutException|JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
} | dockerOperations.executeCommandInContainerAsRoot(containerName, 1L, "rpc_invoke", "-t 1", "tcp/localhost:19091", "setExtraMetrics", buildRPCArgumentFromMetrics()); | public void updateContainerNodeMetrics(int numAllocatedContainersOnHost) {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("flavor", nodeSpec.nodeFlavor)
.add("state", nodeSpec.nodeState.toString())
.add("zone", environment.getZone())
.add("parentHostname", environment.getParentHostHostname());
nodeSpec.vespaVersion.ifPresent(version -> dimensionsBuilder.add("vespaVersion", version));
nodeSpec.owner.ifPresent(owner ->
dimensionsBuilder
.add("tenantName", owner.tenant)
.add("applicationName", owner.application)
.add("instanceName", owner.instance)
.add("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)
.add("app", owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership ->
dimensionsBuilder
.add("clustertype", membership.clusterType)
.add("clusterid", membership.clusterId));
Dimensions dimensions = dimensionsBuilder.build();
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_NODE, dimensions, "alive").sample(1);
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.alive").sample(1);
if (containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.updateIfNeededAndGetDiskMetricsFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
double cpuPercentageOfAllocated = numAllocatedContainersOnHost * cpuPercentageOfHost;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
metricReceiver.declareGauge(APP, dimensions, "cpu.util").sample(cpuPercentageOfAllocated);
metricReceiver.declareGauge(APP, dimensions, "mem.limit").sample(memoryTotalBytes);
metricReceiver.declareGauge(APP, dimensions, "mem.used").sample(memoryTotalBytesUsed);
metricReceiver.declareGauge(APP, dimensions, "mem.util").sample(memoryPercentUsed);
diskTotalBytes.ifPresent(diskLimit -> metricReceiver.declareGauge(APP, dimensions, "disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> metricReceiver.declareGauge(APP, dimensions, "disk.used").sample(diskUsed));
diskPercentUsed.ifPresent(diskUtil -> metricReceiver.declareGauge(APP, dimensions, "disk.util").sample(diskUtil));
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
metricReceiver.declareGauge(APP, netDims, "net.in.bytes").sample(infStats.get("rx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.errors").sample(infStats.get("rx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.in.dropped").sample(infStats.get("rx_dropped").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.bytes").sample(infStats.get("tx_bytes").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.errors").sample(infStats.get("tx_errors").longValue());
metricReceiver.declareGauge(APP, netDims, "net.out.dropped").sample(infStats.get("tx_dropped").longValue());
});
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.cpu.busy.pct").sample(cpuPercentageOfAllocated);
addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time");
addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit");
long memoryUsageTotal = ((Number) stats.getMemoryStats().get("usage")).longValue();
long memoryUsageCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
long memoryUsage = memoryUsageTotal - memoryUsageCache;
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.memory.usage").sample(memoryUsage);
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
addIfNotNull(netDims, "node.net.in.bytes", interfaceStats, "rx_bytes");
addIfNotNull(netDims, "node.net.in.errors", interfaceStats, "rx_errors");
addIfNotNull(netDims, "node.net.in.dropped", interfaceStats, "rx_dropped");
addIfNotNull(netDims, "node.net.out.bytes", interfaceStats, "tx_bytes");
addIfNotNull(netDims, "node.net.out.errors", interfaceStats, "tx_errors");
addIfNotNull(netDims, "node.net.out.dropped", interfaceStats, "tx_dropped");
});
diskTotalBytes.ifPresent(diskLimit ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.limit").sample(diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed ->
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "node.disk.used").sample(diskUsed));
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "uptime").sample(lastCpuMetric.getUptime());
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST_LIFE, dimensions, "alive").sample(1);
try {
dockerOperations.executeCommandInContainerAsRoot(containerName, 1L, "rpc_invoke", "-t 1", "tcp/localhost:19091", "setExtraMetrics", buildRPCArgumentFromMetrics());
} catch (DockerExecTimeoutException|JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container is already running, setting containerState to " + containerState);
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
logger.info("Resume command successfully executed, new containerState is " + containerState);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter(clock.instant());
writeConfigs(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
dockerOperations.removeContainer(existingContainer);
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
if (container.isPresent()) {
writeConfigs(nodeSpec);
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
protected String buildRPCArgumentFromMetrics() throws JsonProcessingException {
StringBuilder params = new StringBuilder();
for (MetricReceiverWrapper.DimensionMetrics dimensionMetrics : metricReceiver.getAllMetrics()) {
params.append(dimensionMetrics.toSecretAgentReport());
}
return "s:'" + params.toString() + "'";
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private void writeConfigs(ContainerNodeSpec nodeSpec) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final MetricReceiverWrapper metricReceiver;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
enum ContainerState {
ABSENT,
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN,
RUNNING
}
private ContainerState containerState = ABSENT;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric;
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final MetricReceiverWrapper metricReceiver,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.metricReceiver = metricReceiver;
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
lastCpuMetric = new CpuUsageReporter(clock.instant());
dockerOperations.getContainer(containerName)
.ifPresent(container -> {
if (container.state.isRunning()) {
lastCpuMetric = new CpuUsageReporter(container.created);
}
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container is already running, setting containerState to " + containerState);
});
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (containerState != RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN) {
return;
}
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
containerState = RUNNING;
logger.info("Resume command successfully executed, new containerState is " + containerState);
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter(clock.instant());
writeConfigs(nodeSpec);
addDebugMessage("startContainerIfNeeded: containerState " + containerState + " -> " +
RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN);
containerState = RUNNING_HOWEVER_RESUME_SCRIPT_NOT_RUN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
dockerOperations.removeContainer(existingContainer);
metricReceiver.unsetMetricsForContainer(hostname);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
metricReceiver.unsetMetricsForContainer(hostname);
if (container.isPresent()) {
writeConfigs(nodeSpec);
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
protected String buildRPCArgumentFromMetrics() throws JsonProcessingException {
StringBuilder params = new StringBuilder();
for (MetricReceiverWrapper.DimensionMetrics dimensionMetrics : metricReceiver.getAllMetrics()) {
params.append(dimensionMetrics.toSecretAgentReport());
}
return "s:'" + params.toString() + "'";
}
@SuppressWarnings("unchecked")
private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) {
Map<String, Object> metricsMap = (Map<String, Object>) metrics;
if (metricsMap == null || !metricsMap.containsKey(metricName)) return;
try {
metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, yamasName)
.sample(((Number) metricsMap.get(metricName)).doubleValue());
} catch (Throwable e) {
logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e);
}
}
private void writeConfigs(ContainerNodeSpec nodeSpec) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
return dockerOperations.getContainer(containerName);
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private final Instant created;
CpuUsageReporter(Instant created) {
this.created = created;
}
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
long getUptime() {
return Duration.between(created, clock.instant()).getSeconds();
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Maybe add a little more context? | protected void destroy() {
boolean alreadyDestructed = destructor.destruct();
if (alreadyDestructed) {
throw new IllegalStateException("Already destructed!");
}
} | throw new IllegalStateException("Already destructed!"); | protected void destroy() {
boolean alreadyDestructed = destructor.destruct();
if (alreadyDestructed) {
throw new IllegalStateException(
"Already destructed! This should not occur unless destroy have been called directly!");
}
} | class ActiveContainer extends AbstractResource implements CurrentContainer {
private final ContainerTermination termination;
private final Injector guiceInjector;
private final Iterable<ServerProvider> serverProviders;
private final ResourcePool resourceReferences = new ResourcePool();
private final Map<String, BindingSet<RequestHandler>> serverBindings;
private final Map<String, BindingSet<RequestHandler>> clientBindings;
private final BindingSetSelector bindingSetSelector;
private final TimeoutManagerImpl timeoutMgr;
final Destructor destructor;
public ActiveContainer(ContainerBuilder builder) {
serverProviders = builder.serverProviders().activate();
serverProviders.forEach(resourceReferences::retain);
serverBindings = builder.activateServerBindings();
serverBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
clientBindings = builder.activateClientBindings();
clientBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
bindingSetSelector = builder.getInstance(BindingSetSelector.class);
timeoutMgr = builder.getInstance(TimeoutManagerImpl.class);
timeoutMgr.start();
builder.guiceModules().install(new AbstractModule() {
@Override
protected void configure() {
bind(TimeoutManagerImpl.class).toInstance(timeoutMgr);
}
});
guiceInjector = builder.guiceModules().activate();
termination = new ContainerTermination(builder.appContext());
destructor = new Destructor(resourceReferences, timeoutMgr, termination);
}
@Override
/**
* Make this instance retain a reference to the resource until it is destroyed.
*/
void retainReference(SharedResource resource) {
resourceReferences.retain(resource);
}
public ContainerTermination shutdown() {
return termination;
}
public Injector guiceInjector() {
return guiceInjector;
}
public Iterable<ServerProvider> serverProviders() {
return serverProviders;
}
public Map<String, BindingSet<RequestHandler>> serverBindings() {
return serverBindings;
}
public BindingSet<RequestHandler> serverBindings(String setName) {
return serverBindings.get(setName);
}
public Map<String, BindingSet<RequestHandler>> clientBindings() {
return clientBindings;
}
public BindingSet<RequestHandler> clientBindings(String setName) {
return clientBindings.get(setName);
}
TimeoutManagerImpl timeoutManager() {
return timeoutMgr;
}
@Override
public ContainerSnapshot newReference(URI uri) {
String name = bindingSetSelector.select(uri);
if (name == null) {
throw new NoBindingSetSelectedException(uri);
}
BindingSet<RequestHandler> serverBindings = serverBindings(name);
BindingSet<RequestHandler> clientBindings = clientBindings(name);
if (serverBindings == null || clientBindings == null) {
throw new BindingSetNotFoundException(name);
}
return new ContainerSnapshot(this, serverBindings, clientBindings);
}
static class Destructor {
private final ResourcePool resourceReferences;
private final TimeoutManagerImpl timeoutMgr;
private final ContainerTermination termination;
private final AtomicBoolean done = new AtomicBoolean();
private Destructor(ResourcePool resourceReferences,
TimeoutManagerImpl timeoutMgr,
ContainerTermination termination) {
this.resourceReferences = resourceReferences;
this.timeoutMgr = timeoutMgr;
this.termination = termination;
}
boolean destruct() {
boolean alreadyDestructed = this.done.getAndSet(true);
if (!alreadyDestructed) {
resourceReferences.release();
timeoutMgr.shutdown();
termination.run();
}
return alreadyDestructed;
}
}
} | class ActiveContainer extends AbstractResource implements CurrentContainer {
private final ContainerTermination termination;
private final Injector guiceInjector;
private final Iterable<ServerProvider> serverProviders;
private final ResourcePool resourceReferences = new ResourcePool();
private final Map<String, BindingSet<RequestHandler>> serverBindings;
private final Map<String, BindingSet<RequestHandler>> clientBindings;
private final BindingSetSelector bindingSetSelector;
private final TimeoutManagerImpl timeoutMgr;
final Destructor destructor;
public ActiveContainer(ContainerBuilder builder) {
serverProviders = builder.serverProviders().activate();
serverProviders.forEach(resourceReferences::retain);
serverBindings = builder.activateServerBindings();
serverBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
clientBindings = builder.activateClientBindings();
clientBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
bindingSetSelector = builder.getInstance(BindingSetSelector.class);
timeoutMgr = builder.getInstance(TimeoutManagerImpl.class);
timeoutMgr.start();
builder.guiceModules().install(new AbstractModule() {
@Override
protected void configure() {
bind(TimeoutManagerImpl.class).toInstance(timeoutMgr);
}
});
guiceInjector = builder.guiceModules().activate();
termination = new ContainerTermination(builder.appContext());
destructor = new Destructor(resourceReferences, timeoutMgr, termination);
}
@Override
/**
* Make this instance retain a reference to the resource until it is destroyed.
*/
void retainReference(SharedResource resource) {
resourceReferences.retain(resource);
}
public ContainerTermination shutdown() {
return termination;
}
public Injector guiceInjector() {
return guiceInjector;
}
public Iterable<ServerProvider> serverProviders() {
return serverProviders;
}
public Map<String, BindingSet<RequestHandler>> serverBindings() {
return serverBindings;
}
public BindingSet<RequestHandler> serverBindings(String setName) {
return serverBindings.get(setName);
}
public Map<String, BindingSet<RequestHandler>> clientBindings() {
return clientBindings;
}
public BindingSet<RequestHandler> clientBindings(String setName) {
return clientBindings.get(setName);
}
TimeoutManagerImpl timeoutManager() {
return timeoutMgr;
}
@Override
public ContainerSnapshot newReference(URI uri) {
String name = bindingSetSelector.select(uri);
if (name == null) {
throw new NoBindingSetSelectedException(uri);
}
BindingSet<RequestHandler> serverBindings = serverBindings(name);
BindingSet<RequestHandler> clientBindings = clientBindings(name);
if (serverBindings == null || clientBindings == null) {
throw new BindingSetNotFoundException(name);
}
return new ContainerSnapshot(this, serverBindings, clientBindings);
}
static class Destructor {
private final ResourcePool resourceReferences;
private final TimeoutManagerImpl timeoutMgr;
private final ContainerTermination termination;
private final AtomicBoolean done = new AtomicBoolean();
private Destructor(ResourcePool resourceReferences,
TimeoutManagerImpl timeoutMgr,
ContainerTermination termination) {
this.resourceReferences = resourceReferences;
this.timeoutMgr = timeoutMgr;
this.termination = termination;
}
boolean destruct() {
boolean alreadyDestructed = this.done.getAndSet(true);
if (!alreadyDestructed) {
resourceReferences.release();
timeoutMgr.shutdown();
termination.run();
}
return alreadyDestructed;
}
}
} |
Fixed in last commit | protected void destroy() {
boolean alreadyDestructed = destructor.destruct();
if (alreadyDestructed) {
throw new IllegalStateException("Already destructed!");
}
} | throw new IllegalStateException("Already destructed!"); | protected void destroy() {
boolean alreadyDestructed = destructor.destruct();
if (alreadyDestructed) {
throw new IllegalStateException(
"Already destructed! This should not occur unless destroy have been called directly!");
}
} | class ActiveContainer extends AbstractResource implements CurrentContainer {
private final ContainerTermination termination;
private final Injector guiceInjector;
private final Iterable<ServerProvider> serverProviders;
private final ResourcePool resourceReferences = new ResourcePool();
private final Map<String, BindingSet<RequestHandler>> serverBindings;
private final Map<String, BindingSet<RequestHandler>> clientBindings;
private final BindingSetSelector bindingSetSelector;
private final TimeoutManagerImpl timeoutMgr;
final Destructor destructor;
public ActiveContainer(ContainerBuilder builder) {
serverProviders = builder.serverProviders().activate();
serverProviders.forEach(resourceReferences::retain);
serverBindings = builder.activateServerBindings();
serverBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
clientBindings = builder.activateClientBindings();
clientBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
bindingSetSelector = builder.getInstance(BindingSetSelector.class);
timeoutMgr = builder.getInstance(TimeoutManagerImpl.class);
timeoutMgr.start();
builder.guiceModules().install(new AbstractModule() {
@Override
protected void configure() {
bind(TimeoutManagerImpl.class).toInstance(timeoutMgr);
}
});
guiceInjector = builder.guiceModules().activate();
termination = new ContainerTermination(builder.appContext());
destructor = new Destructor(resourceReferences, timeoutMgr, termination);
}
@Override
/**
* Make this instance retain a reference to the resource until it is destroyed.
*/
void retainReference(SharedResource resource) {
resourceReferences.retain(resource);
}
public ContainerTermination shutdown() {
return termination;
}
public Injector guiceInjector() {
return guiceInjector;
}
public Iterable<ServerProvider> serverProviders() {
return serverProviders;
}
public Map<String, BindingSet<RequestHandler>> serverBindings() {
return serverBindings;
}
public BindingSet<RequestHandler> serverBindings(String setName) {
return serverBindings.get(setName);
}
public Map<String, BindingSet<RequestHandler>> clientBindings() {
return clientBindings;
}
public BindingSet<RequestHandler> clientBindings(String setName) {
return clientBindings.get(setName);
}
TimeoutManagerImpl timeoutManager() {
return timeoutMgr;
}
@Override
public ContainerSnapshot newReference(URI uri) {
String name = bindingSetSelector.select(uri);
if (name == null) {
throw new NoBindingSetSelectedException(uri);
}
BindingSet<RequestHandler> serverBindings = serverBindings(name);
BindingSet<RequestHandler> clientBindings = clientBindings(name);
if (serverBindings == null || clientBindings == null) {
throw new BindingSetNotFoundException(name);
}
return new ContainerSnapshot(this, serverBindings, clientBindings);
}
static class Destructor {
private final ResourcePool resourceReferences;
private final TimeoutManagerImpl timeoutMgr;
private final ContainerTermination termination;
private final AtomicBoolean done = new AtomicBoolean();
private Destructor(ResourcePool resourceReferences,
TimeoutManagerImpl timeoutMgr,
ContainerTermination termination) {
this.resourceReferences = resourceReferences;
this.timeoutMgr = timeoutMgr;
this.termination = termination;
}
boolean destruct() {
boolean alreadyDestructed = this.done.getAndSet(true);
if (!alreadyDestructed) {
resourceReferences.release();
timeoutMgr.shutdown();
termination.run();
}
return alreadyDestructed;
}
}
} | class ActiveContainer extends AbstractResource implements CurrentContainer {
private final ContainerTermination termination;
private final Injector guiceInjector;
private final Iterable<ServerProvider> serverProviders;
private final ResourcePool resourceReferences = new ResourcePool();
private final Map<String, BindingSet<RequestHandler>> serverBindings;
private final Map<String, BindingSet<RequestHandler>> clientBindings;
private final BindingSetSelector bindingSetSelector;
private final TimeoutManagerImpl timeoutMgr;
final Destructor destructor;
public ActiveContainer(ContainerBuilder builder) {
serverProviders = builder.serverProviders().activate();
serverProviders.forEach(resourceReferences::retain);
serverBindings = builder.activateServerBindings();
serverBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
clientBindings = builder.activateClientBindings();
clientBindings.forEach(
(ignoredName, bindingSet) -> bindingSet.forEach(
binding -> resourceReferences.retain(binding.getValue())));
bindingSetSelector = builder.getInstance(BindingSetSelector.class);
timeoutMgr = builder.getInstance(TimeoutManagerImpl.class);
timeoutMgr.start();
builder.guiceModules().install(new AbstractModule() {
@Override
protected void configure() {
bind(TimeoutManagerImpl.class).toInstance(timeoutMgr);
}
});
guiceInjector = builder.guiceModules().activate();
termination = new ContainerTermination(builder.appContext());
destructor = new Destructor(resourceReferences, timeoutMgr, termination);
}
@Override
/**
* Make this instance retain a reference to the resource until it is destroyed.
*/
void retainReference(SharedResource resource) {
resourceReferences.retain(resource);
}
public ContainerTermination shutdown() {
return termination;
}
public Injector guiceInjector() {
return guiceInjector;
}
public Iterable<ServerProvider> serverProviders() {
return serverProviders;
}
public Map<String, BindingSet<RequestHandler>> serverBindings() {
return serverBindings;
}
public BindingSet<RequestHandler> serverBindings(String setName) {
return serverBindings.get(setName);
}
public Map<String, BindingSet<RequestHandler>> clientBindings() {
return clientBindings;
}
public BindingSet<RequestHandler> clientBindings(String setName) {
return clientBindings.get(setName);
}
TimeoutManagerImpl timeoutManager() {
return timeoutMgr;
}
@Override
public ContainerSnapshot newReference(URI uri) {
String name = bindingSetSelector.select(uri);
if (name == null) {
throw new NoBindingSetSelectedException(uri);
}
BindingSet<RequestHandler> serverBindings = serverBindings(name);
BindingSet<RequestHandler> clientBindings = clientBindings(name);
if (serverBindings == null || clientBindings == null) {
throw new BindingSetNotFoundException(name);
}
return new ContainerSnapshot(this, serverBindings, clientBindings);
}
static class Destructor {
private final ResourcePool resourceReferences;
private final TimeoutManagerImpl timeoutMgr;
private final ContainerTermination termination;
private final AtomicBoolean done = new AtomicBoolean();
private Destructor(ResourcePool resourceReferences,
TimeoutManagerImpl timeoutMgr,
ContainerTermination termination) {
this.resourceReferences = resourceReferences;
this.timeoutMgr = timeoutMgr;
this.termination = termination;
}
boolean destruct() {
boolean alreadyDestructed = this.done.getAndSet(true);
if (!alreadyDestructed) {
resourceReferences.release();
timeoutMgr.shutdown();
termination.run();
}
return alreadyDestructed;
}
}
} |
If I understand this correctly, there is a chance that failures in a different policy might set the context reply directly instead of these being in a context child node? Seems at least `AsyncInitializationPolicy` does this. | public void merge(RoutingContext context) {
RoutingNodeIterator it = context.getChildIterator();
Reply reply = (it.hasReply()) ? it.removeReply() : context.getReply();
if (reply == null) {
reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE,
"No reply in any children, nor in the routing context: " + context));
}
if (reply instanceof WrongDistributionReply) {
distributorSelectionLogic.handleWrongDistribution((WrongDistributionReply) reply, context);
} else if (reply.hasErrors()) {
distributorSelectionLogic.handleErrorReply(reply, context.getContext());
} else if (reply instanceof WriteDocumentReply) {
if (context.shouldTrace(9)) {
context.trace(9, "Modification timestamp: " + ((WriteDocumentReply)reply).getHighestModificationTimestamp());
}
}
context.setReply(reply);
} | Reply reply = (it.hasReply()) ? it.removeReply() : context.getReply(); | public void merge(RoutingContext context) {
RoutingNodeIterator it = context.getChildIterator();
Reply reply = (it.hasReply()) ? it.removeReply() : context.getReply();
if (reply == null) {
reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE,
"No reply in any children, nor in the routing context: " + context));
}
if (reply instanceof WrongDistributionReply) {
distributorSelectionLogic.handleWrongDistribution((WrongDistributionReply) reply, context);
} else if (reply.hasErrors()) {
distributorSelectionLogic.handleErrorReply(reply, context.getContext());
} else if (reply instanceof WriteDocumentReply) {
if (context.shouldTrace(9)) {
context.trace(9, "Modification timestamp: " + ((WriteDocumentReply)reply).getHighestModificationTimestamp());
}
}
context.setReply(reply);
} | class MessageContext {
Integer calculatedDistributor;
ClusterState usedState;
public MessageContext(ClusterState usedState) { this.usedState = usedState; }
public String toString() {
return "Context(Distributor " + calculatedDistributor +
", state version " + usedState.getVersion() + ")";
}
} | class MessageContext {
Integer calculatedDistributor;
ClusterState usedState;
public MessageContext(ClusterState usedState) { this.usedState = usedState; }
public String toString() {
return "Context(Distributor " + calculatedDistributor +
", state version " + usedState.getVersion() + ")";
}
} |
Yes, that was what I concluded. And If there is nothing to iterator over it.removeReply() will generate NullPointerException preventing the error handling later in the method to happen. If it is legitimate that there are no Children I do not know. But hopefully this might shed some more light into that. | public void merge(RoutingContext context) {
RoutingNodeIterator it = context.getChildIterator();
Reply reply = (it.hasReply()) ? it.removeReply() : context.getReply();
if (reply == null) {
reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE,
"No reply in any children, nor in the routing context: " + context));
}
if (reply instanceof WrongDistributionReply) {
distributorSelectionLogic.handleWrongDistribution((WrongDistributionReply) reply, context);
} else if (reply.hasErrors()) {
distributorSelectionLogic.handleErrorReply(reply, context.getContext());
} else if (reply instanceof WriteDocumentReply) {
if (context.shouldTrace(9)) {
context.trace(9, "Modification timestamp: " + ((WriteDocumentReply)reply).getHighestModificationTimestamp());
}
}
context.setReply(reply);
} | Reply reply = (it.hasReply()) ? it.removeReply() : context.getReply(); | public void merge(RoutingContext context) {
RoutingNodeIterator it = context.getChildIterator();
Reply reply = (it.hasReply()) ? it.removeReply() : context.getReply();
if (reply == null) {
reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE,
"No reply in any children, nor in the routing context: " + context));
}
if (reply instanceof WrongDistributionReply) {
distributorSelectionLogic.handleWrongDistribution((WrongDistributionReply) reply, context);
} else if (reply.hasErrors()) {
distributorSelectionLogic.handleErrorReply(reply, context.getContext());
} else if (reply instanceof WriteDocumentReply) {
if (context.shouldTrace(9)) {
context.trace(9, "Modification timestamp: " + ((WriteDocumentReply)reply).getHighestModificationTimestamp());
}
}
context.setReply(reply);
} | class MessageContext {
Integer calculatedDistributor;
ClusterState usedState;
public MessageContext(ClusterState usedState) { this.usedState = usedState; }
public String toString() {
return "Context(Distributor " + calculatedDistributor +
", state version " + usedState.getVersion() + ")";
}
} | class MessageContext {
Integer calculatedDistributor;
ClusterState usedState;
public MessageContext(ClusterState usedState) { this.usedState = usedState; }
public String toString() {
return "Context(Distributor " + calculatedDistributor +
", state version " + usedState.getVersion() + ")";
}
} |
why only prod zones? shouldn't we do this for all zones in CD? | public int distributionBits() {
List<Zone> zonesWith16DistributionBits = Arrays.asList(createZone(Environment.prod, "us-west-1"),
createZone(Environment.prod, "us-central-1"),
createZone(Environment.prod, "eu-west-1"),
createZone(Environment.prod, "ap-northeast-1"),
createZone(Environment.prod, "ap-northeast-2"),
createZone(Environment.prod, "us-east-3"));
if ((zone.system() == SystemName.cd && zone.environment() == Environment.prod) ||
zonesWith16DistributionBits.contains(zone))
return 16;
else
return DistributionBitCalculator.getDistributionBits(getNodeCountPerGroup(), getDistributionMode());
} | if ((zone.system() == SystemName.cd && zone.environment() == Environment.prod) || | public int distributionBits() {
List<Zone> zonesWith16DistributionBits = Arrays.asList(createZone(Environment.prod, "us-west-1"),
createZone(Environment.prod, "us-central-1"),
createZone(Environment.prod, "eu-west-1"),
createZone(Environment.prod, "ap-northeast-1"),
createZone(Environment.prod, "ap-northeast-2"),
createZone(Environment.prod, "us-east-3"));
if (zone.system() == SystemName.cd || zonesWith16DistributionBits.contains(zone))
return 16;
else
return DistributionBitCalculator.getDistributionBits(getNodeCountPerGroup(), getDistributionMode());
} | class Builder {
/** The admin model of this system or null if none (which only happens in tests) */
private final Admin admin;
public Builder(Admin admin) {
this.admin = admin;
}
public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) {
ModelElement contentElement = new ModelElement(w3cContentElement);
ModelElement documentsElement = contentElement.getChild("documents");
Map<String, NewDocumentType> documentDefinitions =
new SearchDefinitionBuilder().build(context.getDeployState().getDocumentModel().getDocumentManager(), documentsElement);
String routingSelection = new DocumentSelectionBuilder().build(documentsElement);
Redundancy redundancy = new RedundancyBuilder().build(contentElement);
Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement);
ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterName(contentElement), documentDefinitions,
globallyDistributedDocuments, routingSelection, redundancy,
context.getDeployState().getProperties().zone());
c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterName(contentElement), contentElement).build(c, contentElement.getXml());
c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments).build(c, contentElement.getXml());
c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c);
c.storageNodes = new StorageCluster.Builder().build(c, w3cContentElement);
c.distributorNodes = new DistributorCluster.Builder(c).build(c, w3cContentElement);
c.rootGroup = new StorageGroup.Builder(contentElement, c, context).buildRootGroup();
validateThatGroupSiblingsAreUnique(c.clusterName, c.rootGroup);
redundancy.setExplicitGroups(c.getRootGroup().getNumberOfLeafGroups());
c.search.handleRedundancy(redundancy);
IndexedSearchCluster index = c.search.getIndexed();
if (index != null) {
setupIndexedCluster(index, contentElement);
}
if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) {
throw new RuntimeException("If you have indexed search you need to have proton as engine");
}
if (documentsElement != null) {
ModelElement e = documentsElement.getChild("document-processing");
if (e != null) {
setupDocumentProcessing(c, e);
}
} else if (c.persistenceFactory != null) {
throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified.");
}
ModelElement tuning = contentElement.getChild("tuning");
if (tuning != null) {
setupTuning(c, tuning);
}
if (context.getParentProducer().getRoot() == null) return c;
addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterName, c);
return c;
}
private void setupIndexedCluster(IndexedSearchCluster index, ModelElement element) {
ContentSearch search = DomContentSearchBuilder.build(element);
Double queryTimeout = search.getQueryTimeout();
if (queryTimeout != null) {
Preconditions.checkState(index.getQueryTimeout() == null,
"You may not specify query-timeout in both proton and content.");
index.setQueryTimeout(queryTimeout);
}
Double visibilityDelay = search.getVisibilityDelay();
if (visibilityDelay != null) {
index.setVisibilityDelay(visibilityDelay);
}
index.setSearchCoverage(DomSearchCoverageBuilder.build(element));
index.setDispatchSpec(DomDispatchBuilder.build(element));
if (index.useMultilevelDispatchSetup()) {
new MultilevelDispatchValidator(index.getClusterName(), index.getDispatchSpec(), index.getSearchNodes()).validate();
}
TuningDispatch tuningDispatch = DomTuningDispatchBuilder.build(element);
Integer maxHitsPerPartition = tuningDispatch.getMaxHitsPerPartition();
Boolean useLocalNode = tuningDispatch.getUseLocalNode();
if (index.getTuning() == null) {
index.setTuning(new Tuning(index));
}
if (index.getTuning().dispatch == null) {
index.getTuning().dispatch = new Tuning.Dispatch();
}
if (maxHitsPerPartition != null) {
index.getTuning().dispatch.maxHitsPerPartition = maxHitsPerPartition;
}
if (useLocalNode != null) {
index.getTuning().dispatch.useLocalNode = useLocalNode;
}
index.getTuning().dispatch.minGroupCoverage = tuningDispatch.getMinGroupCoverage();
index.getTuning().dispatch.minActiveDocsCoverage = tuningDispatch.getMinActiveDocsCoverage();
index.getTuning().dispatch.policy = tuningDispatch.getDispatchPolicy();
}
private void setupDocumentProcessing(ContentCluster c, ModelElement e) {
String docprocCluster = e.getStringAttribute("cluster");
if (docprocCluster != null) {
docprocCluster = docprocCluster.trim();
}
if (c.getSearch().hasIndexedCluster()) {
if (docprocCluster != null && !docprocCluster.isEmpty()) {
c.getSearch().getIndexed().setIndexingClusterName(docprocCluster);
}
}
String docprocChain = e.getStringAttribute("chain");
if (docprocChain != null) {
docprocChain = docprocChain.trim();
}
if (c.getSearch().hasIndexedCluster()) {
if (docprocChain != null && !docprocChain.isEmpty()) {
c.getSearch().getIndexed().setIndexingChainName(docprocChain);
}
}
}
private void setupTuning(ContentCluster c, ModelElement tuning) {
ModelElement distribution = tuning.getChild("distribution");
if (distribution != null) {
String attr = distribution.getStringAttribute("type");
if (attr != null) {
if (attr.toLowerCase().equals("strict")) {
c.distributionMode = DistributionMode.STRICT;
} else if (attr.toLowerCase().equals("loose")) {
c.distributionMode = DistributionMode.LOOSE;
} else if (attr.toLowerCase().equals("legacy")) {
c.distributionMode = DistributionMode.LEGACY;
} else {
throw new IllegalStateException("Distribution type " + attr + " not supported.");
}
}
}
ModelElement merges = tuning.getChild("merges");
if (merges != null) {
Integer attr = merges.getIntegerAttribute("max-nodes-per-merge");
if (attr != null) {
c.maxNodesPerMerge = attr;
}
}
}
private void validateGroupSiblings(String cluster, StorageGroup group) {
HashSet<String> siblings = new HashSet<>();
for (StorageGroup g : group.getSubgroups()) {
String name = g.getName();
if (siblings.contains(name)) {
throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " +
"with name '" + name + "' in the same subgroup. Group sibling names must be unique.");
}
siblings.add(name);
}
}
private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) {
if (group == null) {
return;
}
validateGroupSiblings(cluster, group);
for (StorageGroup g : group.getSubgroups()) {
validateThatGroupSiblingsAreUnique(cluster, g);
}
}
private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context,
StorageGroup rootGroup, ModelElement contentElement,
String contentClusterName, ContentCluster contentCluster) {
if (admin == null) return;
if (contentCluster.getPersistence() == null) return;
ContainerCluster clusterControllers;
ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster);
if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) {
clusterControllers = overlappingCluster.getClusterControllers();
}
else if (admin.multitenant()) {
String clusterName = contentClusterName + "-controllers";
NodesSpecification nodesSpecification =
NodesSpecification.optionalDedicatedFromParent(contentElement.getChild("controllers"), context.getDeployState().getWantedNodeVespaVersion())
.orElse(NodesSpecification.nonDedicated(3, context.getDeployState().getWantedNodeVespaVersion()));
Collection<HostResource> hosts = nodesSpecification.isDedicated() ?
getControllerHosts(nodesSpecification, admin, clusterName, context) :
drawControllerHosts(nodesSpecification.count(), rootGroup, containers);
clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true);
contentCluster.clusterControllers = clusterControllers;
}
else {
clusterControllers = admin.getClusterControllers();
if (clusterControllers == null) {
List<HostResource> hosts = admin.getClusterControllerHosts();
if (hosts.size() > 1) {
admin.deployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly.");
}
clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false);
admin.setClusterControllers(clusterControllers);
}
}
addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster);
}
/** Returns any other content cluster which shares nodes with this, or null if none are built */
private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) {
for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) {
if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster))
return otherContentCluster;
}
return null;
}
private boolean overlaps(ContentCluster c1, ContentCluster c2) {
Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty();
}
private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) {
return nodesSpecification.provision(admin.getHostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger()).keySet();
}
private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) {
List<HostResource> hosts = drawContentHostsRecursively(count, rootGroup);
if (hosts.size() % 2 == 0)
hosts = hosts.subList(0, hosts.size()-1);
return hosts;
}
/**
* Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible
* if less than <code>count</code> are available.
*
* This will draw the same nodes each time it is
* invoked if cluster names and node indexes are unchanged.
*/
private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters,
Set<HostResource> usedHosts) {
if (containerClusters.isEmpty()) return Collections.emptyList();
List<HostResource> allHosts = new ArrayList<>();
for (ContainerCluster cluster : clustersSortedByName(containerClusters))
allHosts.addAll(hostResourcesSortedByIndex(cluster));
List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream()
.filter(h -> ! usedHosts.contains(h))
.filter(h -> ! hostHasClusterController(h.getHostName(), allHosts))
.distinct()
.collect(Collectors.toList());
return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count));
}
private List<ContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) {
return containerModels.stream()
.map(ContainerModel::getCluster)
.sorted(Comparator.comparing(ContainerCluster::getName))
.collect(Collectors.toList());
}
private List<HostResource> hostResourcesSortedByIndex(ContainerCluster cluster) {
return cluster.getContainers().stream()
.sorted(Comparator.comparing(Container::index))
.map(Container::getHostResource)
.collect(Collectors.toList());
}
/** Returns whether any host having the given hostname has a cluster controller */
private boolean hostHasClusterController(String hostname, List<HostResource> hosts) {
for (HostResource host : hosts) {
if ( ! host.getHostName().equals(hostname)) continue;
if (hasClusterController(host))
return true;
}
return false;
}
private boolean hasClusterController(HostResource host) {
for (Service service : host.getServices())
if (service instanceof ClusterControllerContainer)
return true;
return false;
}
/**
* Draw <code>count</code> nodes from as many different content groups below this as possible.
* This will only achieve maximum spread in the case where the groups are balanced and never on the same
* physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy.
*/
private List<HostResource> drawContentHostsRecursively(int count, StorageGroup group) {
Set<HostResource> hosts = new HashSet<>();
if (group.getNodes().isEmpty()) {
int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size());
for (StorageGroup subgroup : group.getSubgroups())
hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, subgroup));
}
else {
hosts.addAll(group.getNodes().stream()
.filter(node -> ! node.isRetired())
.map(StorageNode::getHostResource).collect(Collectors.toList()));
}
List<HostResource> sortedHosts = new ArrayList<>(hosts);
Collections.sort(sortedHosts);
sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size()));
return sortedHosts;
}
private ContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant) {
ContainerCluster clusterControllers = new ContainerCluster(parent, name, name, new ClusterControllerClusterVerifier());
List<Container> containers = new ArrayList<>();
if (clusterControllers.getContainers().isEmpty()) {
int index = 0;
for (HostResource host : hosts) {
ClusterControllerContainer clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant);
clusterControllerContainer.setHostResource(host);
clusterControllerContainer.initService();
clusterControllerContainer.setProp("clustertype", "admin")
.setProp("clustername", clusterControllers.getName())
.setProp("index", String.valueOf(index));
containers.add(clusterControllerContainer);
++index;
}
}
clusterControllers.addContainers(containers);
ContainerModelBuilder.addDefaultHandler_legacyBuilder(clusterControllers);
return clusterControllers;
}
private void addClusterControllerComponentsForThisCluster(ContainerCluster clusterControllers, ContentCluster contentCluster) {
int index = 0;
for (Container container : clusterControllers.getContainers()) {
if ( ! hasClusterControllerComponent(container))
container.addComponent(new ClusterControllerComponent());
container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size()));
}
}
private boolean hasClusterControllerComponent(Container container) {
for (Object o : container.getComponents().getComponents())
if (o instanceof ClusterControllerComponent) return true;
return false;
}
} | class Builder {
/** The admin model of this system or null if none (which only happens in tests) */
private final Admin admin;
public Builder(Admin admin) {
this.admin = admin;
}
public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) {
ModelElement contentElement = new ModelElement(w3cContentElement);
ModelElement documentsElement = contentElement.getChild("documents");
Map<String, NewDocumentType> documentDefinitions =
new SearchDefinitionBuilder().build(context.getDeployState().getDocumentModel().getDocumentManager(), documentsElement);
String routingSelection = new DocumentSelectionBuilder().build(documentsElement);
Redundancy redundancy = new RedundancyBuilder().build(contentElement);
Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement);
ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterName(contentElement), documentDefinitions,
globallyDistributedDocuments, routingSelection, redundancy,
context.getDeployState().getProperties().zone());
c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterName(contentElement), contentElement).build(c, contentElement.getXml());
c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments).build(c, contentElement.getXml());
c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c);
c.storageNodes = new StorageCluster.Builder().build(c, w3cContentElement);
c.distributorNodes = new DistributorCluster.Builder(c).build(c, w3cContentElement);
c.rootGroup = new StorageGroup.Builder(contentElement, c, context).buildRootGroup();
validateThatGroupSiblingsAreUnique(c.clusterName, c.rootGroup);
redundancy.setExplicitGroups(c.getRootGroup().getNumberOfLeafGroups());
c.search.handleRedundancy(redundancy);
IndexedSearchCluster index = c.search.getIndexed();
if (index != null) {
setupIndexedCluster(index, contentElement);
}
if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) {
throw new RuntimeException("If you have indexed search you need to have proton as engine");
}
if (documentsElement != null) {
ModelElement e = documentsElement.getChild("document-processing");
if (e != null) {
setupDocumentProcessing(c, e);
}
} else if (c.persistenceFactory != null) {
throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified.");
}
ModelElement tuning = contentElement.getChild("tuning");
if (tuning != null) {
setupTuning(c, tuning);
}
if (context.getParentProducer().getRoot() == null) return c;
addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterName, c);
return c;
}
private void setupIndexedCluster(IndexedSearchCluster index, ModelElement element) {
ContentSearch search = DomContentSearchBuilder.build(element);
Double queryTimeout = search.getQueryTimeout();
if (queryTimeout != null) {
Preconditions.checkState(index.getQueryTimeout() == null,
"You may not specify query-timeout in both proton and content.");
index.setQueryTimeout(queryTimeout);
}
Double visibilityDelay = search.getVisibilityDelay();
if (visibilityDelay != null) {
index.setVisibilityDelay(visibilityDelay);
}
index.setSearchCoverage(DomSearchCoverageBuilder.build(element));
index.setDispatchSpec(DomDispatchBuilder.build(element));
if (index.useMultilevelDispatchSetup()) {
new MultilevelDispatchValidator(index.getClusterName(), index.getDispatchSpec(), index.getSearchNodes()).validate();
}
TuningDispatch tuningDispatch = DomTuningDispatchBuilder.build(element);
Integer maxHitsPerPartition = tuningDispatch.getMaxHitsPerPartition();
Boolean useLocalNode = tuningDispatch.getUseLocalNode();
if (index.getTuning() == null) {
index.setTuning(new Tuning(index));
}
if (index.getTuning().dispatch == null) {
index.getTuning().dispatch = new Tuning.Dispatch();
}
if (maxHitsPerPartition != null) {
index.getTuning().dispatch.maxHitsPerPartition = maxHitsPerPartition;
}
if (useLocalNode != null) {
index.getTuning().dispatch.useLocalNode = useLocalNode;
}
index.getTuning().dispatch.minGroupCoverage = tuningDispatch.getMinGroupCoverage();
index.getTuning().dispatch.minActiveDocsCoverage = tuningDispatch.getMinActiveDocsCoverage();
index.getTuning().dispatch.policy = tuningDispatch.getDispatchPolicy();
}
private void setupDocumentProcessing(ContentCluster c, ModelElement e) {
String docprocCluster = e.getStringAttribute("cluster");
if (docprocCluster != null) {
docprocCluster = docprocCluster.trim();
}
if (c.getSearch().hasIndexedCluster()) {
if (docprocCluster != null && !docprocCluster.isEmpty()) {
c.getSearch().getIndexed().setIndexingClusterName(docprocCluster);
}
}
String docprocChain = e.getStringAttribute("chain");
if (docprocChain != null) {
docprocChain = docprocChain.trim();
}
if (c.getSearch().hasIndexedCluster()) {
if (docprocChain != null && !docprocChain.isEmpty()) {
c.getSearch().getIndexed().setIndexingChainName(docprocChain);
}
}
}
private void setupTuning(ContentCluster c, ModelElement tuning) {
ModelElement distribution = tuning.getChild("distribution");
if (distribution != null) {
String attr = distribution.getStringAttribute("type");
if (attr != null) {
if (attr.toLowerCase().equals("strict")) {
c.distributionMode = DistributionMode.STRICT;
} else if (attr.toLowerCase().equals("loose")) {
c.distributionMode = DistributionMode.LOOSE;
} else if (attr.toLowerCase().equals("legacy")) {
c.distributionMode = DistributionMode.LEGACY;
} else {
throw new IllegalStateException("Distribution type " + attr + " not supported.");
}
}
}
ModelElement merges = tuning.getChild("merges");
if (merges != null) {
Integer attr = merges.getIntegerAttribute("max-nodes-per-merge");
if (attr != null) {
c.maxNodesPerMerge = attr;
}
}
}
private void validateGroupSiblings(String cluster, StorageGroup group) {
HashSet<String> siblings = new HashSet<>();
for (StorageGroup g : group.getSubgroups()) {
String name = g.getName();
if (siblings.contains(name)) {
throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " +
"with name '" + name + "' in the same subgroup. Group sibling names must be unique.");
}
siblings.add(name);
}
}
private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) {
if (group == null) {
return;
}
validateGroupSiblings(cluster, group);
for (StorageGroup g : group.getSubgroups()) {
validateThatGroupSiblingsAreUnique(cluster, g);
}
}
private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context,
StorageGroup rootGroup, ModelElement contentElement,
String contentClusterName, ContentCluster contentCluster) {
if (admin == null) return;
if (contentCluster.getPersistence() == null) return;
ContainerCluster clusterControllers;
ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster);
if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) {
clusterControllers = overlappingCluster.getClusterControllers();
}
else if (admin.multitenant()) {
String clusterName = contentClusterName + "-controllers";
NodesSpecification nodesSpecification =
NodesSpecification.optionalDedicatedFromParent(contentElement.getChild("controllers"), context.getDeployState().getWantedNodeVespaVersion())
.orElse(NodesSpecification.nonDedicated(3, context.getDeployState().getWantedNodeVespaVersion()));
Collection<HostResource> hosts = nodesSpecification.isDedicated() ?
getControllerHosts(nodesSpecification, admin, clusterName, context) :
drawControllerHosts(nodesSpecification.count(), rootGroup, containers);
clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true);
contentCluster.clusterControllers = clusterControllers;
}
else {
clusterControllers = admin.getClusterControllers();
if (clusterControllers == null) {
List<HostResource> hosts = admin.getClusterControllerHosts();
if (hosts.size() > 1) {
admin.deployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly.");
}
clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false);
admin.setClusterControllers(clusterControllers);
}
}
addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster);
}
/** Returns any other content cluster which shares nodes with this, or null if none are built */
private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) {
for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) {
if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster))
return otherContentCluster;
}
return null;
}
private boolean overlaps(ContentCluster c1, ContentCluster c2) {
Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty();
}
private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) {
return nodesSpecification.provision(admin.getHostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger()).keySet();
}
private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) {
List<HostResource> hosts = drawContentHostsRecursively(count, rootGroup);
if (hosts.size() % 2 == 0)
hosts = hosts.subList(0, hosts.size()-1);
return hosts;
}
/**
* Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible
* if less than <code>count</code> are available.
*
* This will draw the same nodes each time it is
* invoked if cluster names and node indexes are unchanged.
*/
private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters,
Set<HostResource> usedHosts) {
if (containerClusters.isEmpty()) return Collections.emptyList();
List<HostResource> allHosts = new ArrayList<>();
for (ContainerCluster cluster : clustersSortedByName(containerClusters))
allHosts.addAll(hostResourcesSortedByIndex(cluster));
List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream()
.filter(h -> ! usedHosts.contains(h))
.filter(h -> ! hostHasClusterController(h.getHostName(), allHosts))
.distinct()
.collect(Collectors.toList());
return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count));
}
private List<ContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) {
return containerModels.stream()
.map(ContainerModel::getCluster)
.sorted(Comparator.comparing(ContainerCluster::getName))
.collect(Collectors.toList());
}
private List<HostResource> hostResourcesSortedByIndex(ContainerCluster cluster) {
return cluster.getContainers().stream()
.sorted(Comparator.comparing(Container::index))
.map(Container::getHostResource)
.collect(Collectors.toList());
}
/** Returns whether any host having the given hostname has a cluster controller */
private boolean hostHasClusterController(String hostname, List<HostResource> hosts) {
for (HostResource host : hosts) {
if ( ! host.getHostName().equals(hostname)) continue;
if (hasClusterController(host))
return true;
}
return false;
}
private boolean hasClusterController(HostResource host) {
for (Service service : host.getServices())
if (service instanceof ClusterControllerContainer)
return true;
return false;
}
/**
* Draw <code>count</code> nodes from as many different content groups below this as possible.
* This will only achieve maximum spread in the case where the groups are balanced and never on the same
* physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy.
*/
private List<HostResource> drawContentHostsRecursively(int count, StorageGroup group) {
Set<HostResource> hosts = new HashSet<>();
if (group.getNodes().isEmpty()) {
int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size());
for (StorageGroup subgroup : group.getSubgroups())
hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, subgroup));
}
else {
hosts.addAll(group.getNodes().stream()
.filter(node -> ! node.isRetired())
.map(StorageNode::getHostResource).collect(Collectors.toList()));
}
List<HostResource> sortedHosts = new ArrayList<>(hosts);
Collections.sort(sortedHosts);
sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size()));
return sortedHosts;
}
private ContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant) {
ContainerCluster clusterControllers = new ContainerCluster(parent, name, name, new ClusterControllerClusterVerifier());
List<Container> containers = new ArrayList<>();
if (clusterControllers.getContainers().isEmpty()) {
int index = 0;
for (HostResource host : hosts) {
ClusterControllerContainer clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant);
clusterControllerContainer.setHostResource(host);
clusterControllerContainer.initService();
clusterControllerContainer.setProp("clustertype", "admin")
.setProp("clustername", clusterControllers.getName())
.setProp("index", String.valueOf(index));
containers.add(clusterControllerContainer);
++index;
}
}
clusterControllers.addContainers(containers);
ContainerModelBuilder.addDefaultHandler_legacyBuilder(clusterControllers);
return clusterControllers;
}
private void addClusterControllerComponentsForThisCluster(ContainerCluster clusterControllers, ContentCluster contentCluster) {
int index = 0;
for (Container container : clusterControllers.getContainers()) {
if ( ! hasClusterControllerComponent(container))
container.addComponent(new ClusterControllerComponent());
container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size()));
}
}
private boolean hasClusterControllerComponent(Container container) {
for (Object o : container.getComponents().getComponents())
if (o instanceof ClusterControllerComponent) return true;
return false;
}
} |
Renamed. | public void setup() {
doAnswer(invok -> {
boolean shouldRetire = ((Node) invok.getArguments()[0]).ipAddresses().equals(Collections.singleton("::1"));
return shouldRetire ? Optional.of("Some reason") : Optional.empty();
}).when(policy).shouldRetire(any(Node.class));
when(policy.isActive()).thenReturn(true);
NodeFlavors nodeFlavors = NodeRetirerTester.makeFlavors(5);
tester = new NodeRetirerTester(nodeFlavors);
retirer = spy(tester.makeNodeRetirer(policy));
tester.createReadyNodesByFlavor(21, 42, 27, 15, 8);
tester.deployApp("vespa", "calendar", new int[]{3}, new int[]{7});
tester.deployApp("vespa", "notes", new int[]{0}, new int[]{3});
tester.deployApp("sports", "results", new int[]{0}, new int[]{6});
tester.deployApp("search", "images", new int[]{3}, new int[]{4});
tester.deployApp("search", "videos", new int[]{2}, new int[]{2});
tester.deployApp("tester", "my-app", new int[]{1, 2}, new int[]{4, 6});
} | doAnswer(invok -> { | public void setup() {
doAnswer(invoke -> {
boolean shouldRetire = ((Node) invoke.getArguments()[0]).ipAddresses().equals(Collections.singleton("::1"));
return shouldRetire ? Optional.of("Some reason") : Optional.empty();
}).when(policy).shouldRetire(any(Node.class));
when(policy.isActive()).thenReturn(true);
NodeFlavors nodeFlavors = NodeRetirerTester.makeFlavors(5);
tester = new NodeRetirerTester(nodeFlavors);
retirer = spy(tester.makeNodeRetirer(policy));
tester.createReadyNodesByFlavor(21, 42, 27, 15, 8);
tester.deployApp("vespa", "calendar", new int[]{3}, new int[]{7});
tester.deployApp("vespa", "notes", new int[]{0}, new int[]{3});
tester.deployApp("sports", "results", new int[]{0}, new int[]{6});
tester.deployApp("search", "images", new int[]{3}, new int[]{4});
tester.deployApp("search", "videos", new int[]{2}, new int[]{2});
tester.deployApp("tester", "my-app", new int[]{1, 2}, new int[]{4, 6});
} | class NodeRetirerTest {
private NodeRetirerTester tester;
private NodeRetirer retirer;
private final RetirementPolicy policy = mock(RetirementPolicy.class);
@Before
@Test
public void testRetireUnallocated() {
tester.assertCountsForStateByFlavor(Node.State.ready, 12, 38, 19, 4, 8);
tester.setNumberAllowedUnallocatedRetirementsPerFlavor(6, 30, 15, 2, 4);
assertFalse(retirer.retireUnallocated());
tester.assertCountsForStateByFlavor(Node.State.parked, 6, 30, 15, 2, 4);
tester.assertCountsForStateByFlavor(Node.State.ready, 6, 8, 4, 2, 4);
tester.setNumberAllowedUnallocatedRetirementsPerFlavor(10, 20, 5, 5, 4);
assertTrue(retirer.retireUnallocated());
tester.assertCountsForStateByFlavor(Node.State.parked, 12, 38, 19, 4, 8);
tester.nodeRepository.getNodes().forEach(node ->
assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
}
@Test
public void testRetireAllocated() {
tester.nodeRepository.getNodes(Node.State.ready)
.forEach(node -> tester.nodeRepository.write(node.withIpAddresses(Collections.singleton("::2"))));
tester.assertCountsForStateByFlavor(Node.State.active, 9, 4, 8, 11, -1);
tester.setNumberAllowedAllocatedRetirementsPerFlavor(3, 2, 4, 2);
retirer.retireAllocated();
tester.assertParkedCountsByApplication(-1, -1, -1, -1, -1, -1);
tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
retirer.retireAllocated();
retirer.retireAllocated();
tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 1, 1, 1, 2);
retirer.retireAllocated();
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
retirer.retireAllocated();
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
tester.nodeRepository.getNodes().forEach(node ->
assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
}
@Test
public void testGetActiveApplicationIds() {
List<String> expectedOrder = Arrays.asList(
"tester.my-app", "vespa.calendar", "sports.results", "search.images", "vespa.notes", "search.videos");
List<String> actualOrder = retirer.getActiveApplicationIds(tester.nodeRepository.getNodes()).stream()
.map(applicationId -> applicationId.toShortString().replace(":default", ""))
.collect(Collectors.toList());
assertEquals(expectedOrder, actualOrder);
}
@Test
public void testGetRetireableNodesForApplication() {
ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
List<Node> nodes = tester.nodeRepository.getNodes(app);
Set<String> actual = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
Set<String> expected = nodes.stream().map(Node::hostname).collect(Collectors.toSet());
assertEquals(expected, actual);
Node nodeWantToRetire = tester.nodeRepository.getNode("host3.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.write(nodeWantToRetire.with(nodeWantToRetire.status().withWantToRetire(true)));
Node nodeToFail = tester.nodeRepository.getNode("host5.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.fail(nodeToFail.hostname(), Agent.system, "Failed for unit testing");
Node nodeToUpdate = tester.nodeRepository.getNode("host8.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.write(nodeToUpdate.withIpAddresses(Collections.singleton("::2")));
nodes = tester.nodeRepository.getNodes(app);
Set<String> excluded = Stream.of(nodeWantToRetire, nodeToFail, nodeToUpdate).map(Node::hostname).collect(Collectors.toSet());
Set<String> actualAfterUpdates = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
Set<String> expectedAfterUpdates = nodes.stream().map(Node::hostname).filter(node -> !excluded.contains(node)).collect(Collectors.toSet());
assertEquals(expectedAfterUpdates, actualAfterUpdates);
}
@Test
public void testGetNumberNodesAllowToRetireForCluster() {
ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
long actualAllActive = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(2, actualAllActive);
List<Node> nodesToRetire = tester.nodeRepository.getNodes(app).stream().limit(3).collect(Collectors.toList());
nodesToRetire.forEach(node -> tester.nodeRepository.write(node.with(node.status().withWantToRetire(true))));
long actualOneWantToRetire = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(0, actualOneWantToRetire);
nodesToRetire.stream().limit(2).forEach(node ->
tester.nodeRepository.park(node.hostname(), Agent.system, "Parked for unit testing"));
long actualOneRetired = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(1, actualOneRetired);
}
@Test
public void inactivePolicyDoesNothingTest() {
when(policy.isActive()).thenReturn(false);
retirer.maintain();
verify(retirer, never()).retireUnallocated();
verify(retirer, never()).retireAllocated();
}
} | class NodeRetirerTest {
private NodeRetirerTester tester;
private NodeRetirer retirer;
private final RetirementPolicy policy = mock(RetirementPolicy.class);
@Before
@Test
public void testRetireUnallocated() {
tester.assertCountsForStateByFlavor(Node.State.ready, 12, 38, 19, 4, 8);
tester.setNumberAllowedUnallocatedRetirementsPerFlavor(6, 30, 15, 2, 4);
assertFalse(retirer.retireUnallocated());
tester.assertCountsForStateByFlavor(Node.State.parked, 6, 30, 15, 2, 4);
tester.assertCountsForStateByFlavor(Node.State.ready, 6, 8, 4, 2, 4);
tester.setNumberAllowedUnallocatedRetirementsPerFlavor(10, 20, 5, 5, 4);
assertTrue(retirer.retireUnallocated());
tester.assertCountsForStateByFlavor(Node.State.parked, 12, 38, 19, 4, 8);
tester.nodeRepository.getNodes().forEach(node ->
assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
}
@Test
public void testRetireAllocated() {
tester.nodeRepository.getNodes(Node.State.ready)
.forEach(node -> tester.nodeRepository.write(node.withIpAddresses(Collections.singleton("::2"))));
tester.assertCountsForStateByFlavor(Node.State.active, 9, 4, 8, 11, -1);
tester.setNumberAllowedAllocatedRetirementsPerFlavor(3, 2, 4, 2);
retirer.retireAllocated();
tester.assertParkedCountsByApplication(-1, -1, -1, -1, -1, -1);
tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
retirer.retireAllocated();
retirer.retireAllocated();
tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 1, 1, 1, 2);
retirer.retireAllocated();
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
retirer.retireAllocated();
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
tester.nodeRepository.getNodes().forEach(node ->
assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
}
@Test
public void testGetActiveApplicationIds() {
List<String> expectedOrder = Arrays.asList(
"tester.my-app", "vespa.calendar", "sports.results", "search.images", "vespa.notes", "search.videos");
List<String> actualOrder = retirer.getActiveApplicationIds(tester.nodeRepository.getNodes()).stream()
.map(applicationId -> applicationId.toShortString().replace(":default", ""))
.collect(Collectors.toList());
assertEquals(expectedOrder, actualOrder);
}
@Test
public void testGetRetireableNodesForApplication() {
ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
List<Node> nodes = tester.nodeRepository.getNodes(app);
Set<String> actual = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
Set<String> expected = nodes.stream().map(Node::hostname).collect(Collectors.toSet());
assertEquals(expected, actual);
Node nodeWantToRetire = tester.nodeRepository.getNode("host3.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.write(nodeWantToRetire.with(nodeWantToRetire.status().withWantToRetire(true)));
Node nodeToFail = tester.nodeRepository.getNode("host5.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.fail(nodeToFail.hostname(), Agent.system, "Failed for unit testing");
Node nodeToUpdate = tester.nodeRepository.getNode("host8.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.write(nodeToUpdate.withIpAddresses(Collections.singleton("::2")));
nodes = tester.nodeRepository.getNodes(app);
Set<String> excluded = Stream.of(nodeWantToRetire, nodeToFail, nodeToUpdate).map(Node::hostname).collect(Collectors.toSet());
Set<String> actualAfterUpdates = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
Set<String> expectedAfterUpdates = nodes.stream().map(Node::hostname).filter(node -> !excluded.contains(node)).collect(Collectors.toSet());
assertEquals(expectedAfterUpdates, actualAfterUpdates);
}
@Test
public void testGetNumberNodesAllowToRetireForCluster() {
ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
long actualAllActive = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(2, actualAllActive);
List<Node> nodesToRetire = tester.nodeRepository.getNodes(app).stream().limit(3).collect(Collectors.toList());
nodesToRetire.forEach(node -> tester.nodeRepository.write(node.with(node.status().withWantToRetire(true))));
long actualOneWantToRetire = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(0, actualOneWantToRetire);
nodesToRetire.stream().limit(2).forEach(node ->
tester.nodeRepository.park(node.hostname(), Agent.system, "Parked for unit testing"));
long actualOneRetired = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(1, actualOneRetired);
}
@Test
public void inactivePolicyDoesNothingTest() {
when(policy.isActive()).thenReturn(false);
retirer.maintain();
verify(retirer, never()).retireUnallocated();
verify(retirer, never()).retireAllocated();
}
} |
Yes, eventually, but we thought there were issues with having it in staging (in CD also) due to visiting being slow when running tests. If we don't do any visiting in the way we talk about here in CD tests (I think we should, though) we can enable it for all zones in CD. What do you think? | public int distributionBits() {
List<Zone> zonesWith16DistributionBits = Arrays.asList(createZone(Environment.prod, "us-west-1"),
createZone(Environment.prod, "us-central-1"),
createZone(Environment.prod, "eu-west-1"),
createZone(Environment.prod, "ap-northeast-1"),
createZone(Environment.prod, "ap-northeast-2"),
createZone(Environment.prod, "us-east-3"));
if ((zone.system() == SystemName.cd && zone.environment() == Environment.prod) ||
zonesWith16DistributionBits.contains(zone))
return 16;
else
return DistributionBitCalculator.getDistributionBits(getNodeCountPerGroup(), getDistributionMode());
} | if ((zone.system() == SystemName.cd && zone.environment() == Environment.prod) || | public int distributionBits() {
List<Zone> zonesWith16DistributionBits = Arrays.asList(createZone(Environment.prod, "us-west-1"),
createZone(Environment.prod, "us-central-1"),
createZone(Environment.prod, "eu-west-1"),
createZone(Environment.prod, "ap-northeast-1"),
createZone(Environment.prod, "ap-northeast-2"),
createZone(Environment.prod, "us-east-3"));
if (zone.system() == SystemName.cd || zonesWith16DistributionBits.contains(zone))
return 16;
else
return DistributionBitCalculator.getDistributionBits(getNodeCountPerGroup(), getDistributionMode());
} | class Builder {
/** The admin model of this system or null if none (which only happens in tests) */
private final Admin admin;
public Builder(Admin admin) {
this.admin = admin;
}
public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) {
ModelElement contentElement = new ModelElement(w3cContentElement);
ModelElement documentsElement = contentElement.getChild("documents");
Map<String, NewDocumentType> documentDefinitions =
new SearchDefinitionBuilder().build(context.getDeployState().getDocumentModel().getDocumentManager(), documentsElement);
String routingSelection = new DocumentSelectionBuilder().build(documentsElement);
Redundancy redundancy = new RedundancyBuilder().build(contentElement);
Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement);
ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterName(contentElement), documentDefinitions,
globallyDistributedDocuments, routingSelection, redundancy,
context.getDeployState().getProperties().zone());
c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterName(contentElement), contentElement).build(c, contentElement.getXml());
c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments).build(c, contentElement.getXml());
c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c);
c.storageNodes = new StorageCluster.Builder().build(c, w3cContentElement);
c.distributorNodes = new DistributorCluster.Builder(c).build(c, w3cContentElement);
c.rootGroup = new StorageGroup.Builder(contentElement, c, context).buildRootGroup();
validateThatGroupSiblingsAreUnique(c.clusterName, c.rootGroup);
redundancy.setExplicitGroups(c.getRootGroup().getNumberOfLeafGroups());
c.search.handleRedundancy(redundancy);
IndexedSearchCluster index = c.search.getIndexed();
if (index != null) {
setupIndexedCluster(index, contentElement);
}
if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) {
throw new RuntimeException("If you have indexed search you need to have proton as engine");
}
if (documentsElement != null) {
ModelElement e = documentsElement.getChild("document-processing");
if (e != null) {
setupDocumentProcessing(c, e);
}
} else if (c.persistenceFactory != null) {
throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified.");
}
ModelElement tuning = contentElement.getChild("tuning");
if (tuning != null) {
setupTuning(c, tuning);
}
if (context.getParentProducer().getRoot() == null) return c;
addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterName, c);
return c;
}
private void setupIndexedCluster(IndexedSearchCluster index, ModelElement element) {
ContentSearch search = DomContentSearchBuilder.build(element);
Double queryTimeout = search.getQueryTimeout();
if (queryTimeout != null) {
Preconditions.checkState(index.getQueryTimeout() == null,
"You may not specify query-timeout in both proton and content.");
index.setQueryTimeout(queryTimeout);
}
Double visibilityDelay = search.getVisibilityDelay();
if (visibilityDelay != null) {
index.setVisibilityDelay(visibilityDelay);
}
index.setSearchCoverage(DomSearchCoverageBuilder.build(element));
index.setDispatchSpec(DomDispatchBuilder.build(element));
if (index.useMultilevelDispatchSetup()) {
new MultilevelDispatchValidator(index.getClusterName(), index.getDispatchSpec(), index.getSearchNodes()).validate();
}
TuningDispatch tuningDispatch = DomTuningDispatchBuilder.build(element);
Integer maxHitsPerPartition = tuningDispatch.getMaxHitsPerPartition();
Boolean useLocalNode = tuningDispatch.getUseLocalNode();
if (index.getTuning() == null) {
index.setTuning(new Tuning(index));
}
if (index.getTuning().dispatch == null) {
index.getTuning().dispatch = new Tuning.Dispatch();
}
if (maxHitsPerPartition != null) {
index.getTuning().dispatch.maxHitsPerPartition = maxHitsPerPartition;
}
if (useLocalNode != null) {
index.getTuning().dispatch.useLocalNode = useLocalNode;
}
index.getTuning().dispatch.minGroupCoverage = tuningDispatch.getMinGroupCoverage();
index.getTuning().dispatch.minActiveDocsCoverage = tuningDispatch.getMinActiveDocsCoverage();
index.getTuning().dispatch.policy = tuningDispatch.getDispatchPolicy();
}
private void setupDocumentProcessing(ContentCluster c, ModelElement e) {
String docprocCluster = e.getStringAttribute("cluster");
if (docprocCluster != null) {
docprocCluster = docprocCluster.trim();
}
if (c.getSearch().hasIndexedCluster()) {
if (docprocCluster != null && !docprocCluster.isEmpty()) {
c.getSearch().getIndexed().setIndexingClusterName(docprocCluster);
}
}
String docprocChain = e.getStringAttribute("chain");
if (docprocChain != null) {
docprocChain = docprocChain.trim();
}
if (c.getSearch().hasIndexedCluster()) {
if (docprocChain != null && !docprocChain.isEmpty()) {
c.getSearch().getIndexed().setIndexingChainName(docprocChain);
}
}
}
private void setupTuning(ContentCluster c, ModelElement tuning) {
ModelElement distribution = tuning.getChild("distribution");
if (distribution != null) {
String attr = distribution.getStringAttribute("type");
if (attr != null) {
if (attr.toLowerCase().equals("strict")) {
c.distributionMode = DistributionMode.STRICT;
} else if (attr.toLowerCase().equals("loose")) {
c.distributionMode = DistributionMode.LOOSE;
} else if (attr.toLowerCase().equals("legacy")) {
c.distributionMode = DistributionMode.LEGACY;
} else {
throw new IllegalStateException("Distribution type " + attr + " not supported.");
}
}
}
ModelElement merges = tuning.getChild("merges");
if (merges != null) {
Integer attr = merges.getIntegerAttribute("max-nodes-per-merge");
if (attr != null) {
c.maxNodesPerMerge = attr;
}
}
}
private void validateGroupSiblings(String cluster, StorageGroup group) {
HashSet<String> siblings = new HashSet<>();
for (StorageGroup g : group.getSubgroups()) {
String name = g.getName();
if (siblings.contains(name)) {
throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " +
"with name '" + name + "' in the same subgroup. Group sibling names must be unique.");
}
siblings.add(name);
}
}
private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) {
if (group == null) {
return;
}
validateGroupSiblings(cluster, group);
for (StorageGroup g : group.getSubgroups()) {
validateThatGroupSiblingsAreUnique(cluster, g);
}
}
private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context,
StorageGroup rootGroup, ModelElement contentElement,
String contentClusterName, ContentCluster contentCluster) {
if (admin == null) return;
if (contentCluster.getPersistence() == null) return;
ContainerCluster clusterControllers;
ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster);
if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) {
clusterControllers = overlappingCluster.getClusterControllers();
}
else if (admin.multitenant()) {
String clusterName = contentClusterName + "-controllers";
NodesSpecification nodesSpecification =
NodesSpecification.optionalDedicatedFromParent(contentElement.getChild("controllers"), context.getDeployState().getWantedNodeVespaVersion())
.orElse(NodesSpecification.nonDedicated(3, context.getDeployState().getWantedNodeVespaVersion()));
Collection<HostResource> hosts = nodesSpecification.isDedicated() ?
getControllerHosts(nodesSpecification, admin, clusterName, context) :
drawControllerHosts(nodesSpecification.count(), rootGroup, containers);
clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true);
contentCluster.clusterControllers = clusterControllers;
}
else {
clusterControllers = admin.getClusterControllers();
if (clusterControllers == null) {
List<HostResource> hosts = admin.getClusterControllerHosts();
if (hosts.size() > 1) {
admin.deployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly.");
}
clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false);
admin.setClusterControllers(clusterControllers);
}
}
addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster);
}
/** Returns any other content cluster which shares nodes with this, or null if none are built */
private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) {
for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) {
if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster))
return otherContentCluster;
}
return null;
}
private boolean overlaps(ContentCluster c1, ContentCluster c2) {
Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty();
}
private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) {
return nodesSpecification.provision(admin.getHostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger()).keySet();
}
private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) {
List<HostResource> hosts = drawContentHostsRecursively(count, rootGroup);
if (hosts.size() % 2 == 0)
hosts = hosts.subList(0, hosts.size()-1);
return hosts;
}
/**
* Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible
* if less than <code>count</code> are available.
*
* This will draw the same nodes each time it is
* invoked if cluster names and node indexes are unchanged.
*/
private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters,
Set<HostResource> usedHosts) {
if (containerClusters.isEmpty()) return Collections.emptyList();
List<HostResource> allHosts = new ArrayList<>();
for (ContainerCluster cluster : clustersSortedByName(containerClusters))
allHosts.addAll(hostResourcesSortedByIndex(cluster));
List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream()
.filter(h -> ! usedHosts.contains(h))
.filter(h -> ! hostHasClusterController(h.getHostName(), allHosts))
.distinct()
.collect(Collectors.toList());
return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count));
}
private List<ContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) {
return containerModels.stream()
.map(ContainerModel::getCluster)
.sorted(Comparator.comparing(ContainerCluster::getName))
.collect(Collectors.toList());
}
private List<HostResource> hostResourcesSortedByIndex(ContainerCluster cluster) {
return cluster.getContainers().stream()
.sorted(Comparator.comparing(Container::index))
.map(Container::getHostResource)
.collect(Collectors.toList());
}
/** Returns whether any host having the given hostname has a cluster controller */
private boolean hostHasClusterController(String hostname, List<HostResource> hosts) {
for (HostResource host : hosts) {
if ( ! host.getHostName().equals(hostname)) continue;
if (hasClusterController(host))
return true;
}
return false;
}
private boolean hasClusterController(HostResource host) {
for (Service service : host.getServices())
if (service instanceof ClusterControllerContainer)
return true;
return false;
}
/**
* Draw <code>count</code> nodes from as many different content groups below this as possible.
* This will only achieve maximum spread in the case where the groups are balanced and never on the same
* physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy.
*/
private List<HostResource> drawContentHostsRecursively(int count, StorageGroup group) {
Set<HostResource> hosts = new HashSet<>();
if (group.getNodes().isEmpty()) {
int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size());
for (StorageGroup subgroup : group.getSubgroups())
hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, subgroup));
}
else {
hosts.addAll(group.getNodes().stream()
.filter(node -> ! node.isRetired())
.map(StorageNode::getHostResource).collect(Collectors.toList()));
}
List<HostResource> sortedHosts = new ArrayList<>(hosts);
Collections.sort(sortedHosts);
sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size()));
return sortedHosts;
}
private ContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant) {
ContainerCluster clusterControllers = new ContainerCluster(parent, name, name, new ClusterControllerClusterVerifier());
List<Container> containers = new ArrayList<>();
if (clusterControllers.getContainers().isEmpty()) {
int index = 0;
for (HostResource host : hosts) {
ClusterControllerContainer clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant);
clusterControllerContainer.setHostResource(host);
clusterControllerContainer.initService();
clusterControllerContainer.setProp("clustertype", "admin")
.setProp("clustername", clusterControllers.getName())
.setProp("index", String.valueOf(index));
containers.add(clusterControllerContainer);
++index;
}
}
clusterControllers.addContainers(containers);
ContainerModelBuilder.addDefaultHandler_legacyBuilder(clusterControllers);
return clusterControllers;
}
private void addClusterControllerComponentsForThisCluster(ContainerCluster clusterControllers, ContentCluster contentCluster) {
int index = 0;
for (Container container : clusterControllers.getContainers()) {
if ( ! hasClusterControllerComponent(container))
container.addComponent(new ClusterControllerComponent());
container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size()));
}
}
private boolean hasClusterControllerComponent(Container container) {
for (Object o : container.getComponents().getComponents())
if (o instanceof ClusterControllerComponent) return true;
return false;
}
} | class Builder {
/** The admin model of this system or null if none (which only happens in tests) */
private final Admin admin;
public Builder(Admin admin) {
this.admin = admin;
}
public ContentCluster build(Collection<ContainerModel> containers, ConfigModelContext context, Element w3cContentElement) {
ModelElement contentElement = new ModelElement(w3cContentElement);
ModelElement documentsElement = contentElement.getChild("documents");
Map<String, NewDocumentType> documentDefinitions =
new SearchDefinitionBuilder().build(context.getDeployState().getDocumentModel().getDocumentManager(), documentsElement);
String routingSelection = new DocumentSelectionBuilder().build(documentsElement);
Redundancy redundancy = new RedundancyBuilder().build(contentElement);
Set<NewDocumentType> globallyDistributedDocuments = new GlobalDistributionBuilder(documentDefinitions).build(documentsElement);
ContentCluster c = new ContentCluster(context.getParentProducer(), getClusterName(contentElement), documentDefinitions,
globallyDistributedDocuments, routingSelection, redundancy,
context.getDeployState().getProperties().zone());
c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterName(contentElement), contentElement).build(c, contentElement.getXml());
c.search = new ContentSearchCluster.Builder(documentDefinitions, globallyDistributedDocuments).build(c, contentElement.getXml());
c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c);
c.storageNodes = new StorageCluster.Builder().build(c, w3cContentElement);
c.distributorNodes = new DistributorCluster.Builder(c).build(c, w3cContentElement);
c.rootGroup = new StorageGroup.Builder(contentElement, c, context).buildRootGroup();
validateThatGroupSiblingsAreUnique(c.clusterName, c.rootGroup);
redundancy.setExplicitGroups(c.getRootGroup().getNumberOfLeafGroups());
c.search.handleRedundancy(redundancy);
IndexedSearchCluster index = c.search.getIndexed();
if (index != null) {
setupIndexedCluster(index, contentElement);
}
if (c.search.hasIndexedCluster() && !(c.persistenceFactory instanceof ProtonEngine.Factory) ) {
throw new RuntimeException("If you have indexed search you need to have proton as engine");
}
if (documentsElement != null) {
ModelElement e = documentsElement.getChild("document-processing");
if (e != null) {
setupDocumentProcessing(c, e);
}
} else if (c.persistenceFactory != null) {
throw new IllegalArgumentException("The specified content engine requires the <documents> element to be specified.");
}
ModelElement tuning = contentElement.getChild("tuning");
if (tuning != null) {
setupTuning(c, tuning);
}
if (context.getParentProducer().getRoot() == null) return c;
addClusterControllers(containers, context, c.rootGroup, contentElement, c.clusterName, c);
return c;
}
private void setupIndexedCluster(IndexedSearchCluster index, ModelElement element) {
ContentSearch search = DomContentSearchBuilder.build(element);
Double queryTimeout = search.getQueryTimeout();
if (queryTimeout != null) {
Preconditions.checkState(index.getQueryTimeout() == null,
"You may not specify query-timeout in both proton and content.");
index.setQueryTimeout(queryTimeout);
}
Double visibilityDelay = search.getVisibilityDelay();
if (visibilityDelay != null) {
index.setVisibilityDelay(visibilityDelay);
}
index.setSearchCoverage(DomSearchCoverageBuilder.build(element));
index.setDispatchSpec(DomDispatchBuilder.build(element));
if (index.useMultilevelDispatchSetup()) {
new MultilevelDispatchValidator(index.getClusterName(), index.getDispatchSpec(), index.getSearchNodes()).validate();
}
TuningDispatch tuningDispatch = DomTuningDispatchBuilder.build(element);
Integer maxHitsPerPartition = tuningDispatch.getMaxHitsPerPartition();
Boolean useLocalNode = tuningDispatch.getUseLocalNode();
if (index.getTuning() == null) {
index.setTuning(new Tuning(index));
}
if (index.getTuning().dispatch == null) {
index.getTuning().dispatch = new Tuning.Dispatch();
}
if (maxHitsPerPartition != null) {
index.getTuning().dispatch.maxHitsPerPartition = maxHitsPerPartition;
}
if (useLocalNode != null) {
index.getTuning().dispatch.useLocalNode = useLocalNode;
}
index.getTuning().dispatch.minGroupCoverage = tuningDispatch.getMinGroupCoverage();
index.getTuning().dispatch.minActiveDocsCoverage = tuningDispatch.getMinActiveDocsCoverage();
index.getTuning().dispatch.policy = tuningDispatch.getDispatchPolicy();
}
private void setupDocumentProcessing(ContentCluster c, ModelElement e) {
String docprocCluster = e.getStringAttribute("cluster");
if (docprocCluster != null) {
docprocCluster = docprocCluster.trim();
}
if (c.getSearch().hasIndexedCluster()) {
if (docprocCluster != null && !docprocCluster.isEmpty()) {
c.getSearch().getIndexed().setIndexingClusterName(docprocCluster);
}
}
String docprocChain = e.getStringAttribute("chain");
if (docprocChain != null) {
docprocChain = docprocChain.trim();
}
if (c.getSearch().hasIndexedCluster()) {
if (docprocChain != null && !docprocChain.isEmpty()) {
c.getSearch().getIndexed().setIndexingChainName(docprocChain);
}
}
}
private void setupTuning(ContentCluster c, ModelElement tuning) {
ModelElement distribution = tuning.getChild("distribution");
if (distribution != null) {
String attr = distribution.getStringAttribute("type");
if (attr != null) {
if (attr.toLowerCase().equals("strict")) {
c.distributionMode = DistributionMode.STRICT;
} else if (attr.toLowerCase().equals("loose")) {
c.distributionMode = DistributionMode.LOOSE;
} else if (attr.toLowerCase().equals("legacy")) {
c.distributionMode = DistributionMode.LEGACY;
} else {
throw new IllegalStateException("Distribution type " + attr + " not supported.");
}
}
}
ModelElement merges = tuning.getChild("merges");
if (merges != null) {
Integer attr = merges.getIntegerAttribute("max-nodes-per-merge");
if (attr != null) {
c.maxNodesPerMerge = attr;
}
}
}
private void validateGroupSiblings(String cluster, StorageGroup group) {
HashSet<String> siblings = new HashSet<>();
for (StorageGroup g : group.getSubgroups()) {
String name = g.getName();
if (siblings.contains(name)) {
throw new IllegalArgumentException("Cluster '" + cluster + "' has multiple groups " +
"with name '" + name + "' in the same subgroup. Group sibling names must be unique.");
}
siblings.add(name);
}
}
private void validateThatGroupSiblingsAreUnique(String cluster, StorageGroup group) {
if (group == null) {
return;
}
validateGroupSiblings(cluster, group);
for (StorageGroup g : group.getSubgroups()) {
validateThatGroupSiblingsAreUnique(cluster, g);
}
}
private void addClusterControllers(Collection<ContainerModel> containers, ConfigModelContext context,
StorageGroup rootGroup, ModelElement contentElement,
String contentClusterName, ContentCluster contentCluster) {
if (admin == null) return;
if (contentCluster.getPersistence() == null) return;
ContainerCluster clusterControllers;
ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster);
if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) {
clusterControllers = overlappingCluster.getClusterControllers();
}
else if (admin.multitenant()) {
String clusterName = contentClusterName + "-controllers";
NodesSpecification nodesSpecification =
NodesSpecification.optionalDedicatedFromParent(contentElement.getChild("controllers"), context.getDeployState().getWantedNodeVespaVersion())
.orElse(NodesSpecification.nonDedicated(3, context.getDeployState().getWantedNodeVespaVersion()));
Collection<HostResource> hosts = nodesSpecification.isDedicated() ?
getControllerHosts(nodesSpecification, admin, clusterName, context) :
drawControllerHosts(nodesSpecification.count(), rootGroup, containers);
clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone"), hosts, clusterName, true);
contentCluster.clusterControllers = clusterControllers;
}
else {
clusterControllers = admin.getClusterControllers();
if (clusterControllers == null) {
List<HostResource> hosts = admin.getClusterControllerHosts();
if (hosts.size() > 1) {
admin.deployLogger().log(Level.INFO, "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly.");
}
clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false);
admin.setClusterControllers(clusterControllers);
}
}
addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster);
}
/** Returns any other content cluster which shares nodes with this, or null if none are built */
private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) {
for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) {
if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster))
return otherContentCluster;
}
return null;
}
private boolean overlaps(ContentCluster c1, ContentCluster c2) {
Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty();
}
private Collection<HostResource> getControllerHosts(NodesSpecification nodesSpecification, Admin admin, String clusterName, ConfigModelContext context) {
return nodesSpecification.provision(admin.getHostSystem(), ClusterSpec.Type.admin, ClusterSpec.Id.from(clusterName), context.getDeployLogger()).keySet();
}
private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup, Collection<ContainerModel> containers) {
List<HostResource> hosts = drawContentHostsRecursively(count, rootGroup);
if (hosts.size() % 2 == 0)
hosts = hosts.subList(0, hosts.size()-1);
return hosts;
}
/**
* Draws <code>count</code> container nodes to use as cluster controllers, or as many as possible
* if less than <code>count</code> are available.
*
* This will draw the same nodes each time it is
* invoked if cluster names and node indexes are unchanged.
*/
private List<HostResource> drawContainerHosts(int count, Collection<ContainerModel> containerClusters,
Set<HostResource> usedHosts) {
if (containerClusters.isEmpty()) return Collections.emptyList();
List<HostResource> allHosts = new ArrayList<>();
for (ContainerCluster cluster : clustersSortedByName(containerClusters))
allHosts.addAll(hostResourcesSortedByIndex(cluster));
List<HostResource> uniqueHostsWithoutClusterController = allHosts.stream()
.filter(h -> ! usedHosts.contains(h))
.filter(h -> ! hostHasClusterController(h.getHostName(), allHosts))
.distinct()
.collect(Collectors.toList());
return uniqueHostsWithoutClusterController.subList(0, Math.min(uniqueHostsWithoutClusterController.size(), count));
}
private List<ContainerCluster> clustersSortedByName(Collection<ContainerModel> containerModels) {
return containerModels.stream()
.map(ContainerModel::getCluster)
.sorted(Comparator.comparing(ContainerCluster::getName))
.collect(Collectors.toList());
}
private List<HostResource> hostResourcesSortedByIndex(ContainerCluster cluster) {
return cluster.getContainers().stream()
.sorted(Comparator.comparing(Container::index))
.map(Container::getHostResource)
.collect(Collectors.toList());
}
/** Returns whether any host having the given hostname has a cluster controller */
private boolean hostHasClusterController(String hostname, List<HostResource> hosts) {
for (HostResource host : hosts) {
if ( ! host.getHostName().equals(hostname)) continue;
if (hasClusterController(host))
return true;
}
return false;
}
private boolean hasClusterController(HostResource host) {
for (Service service : host.getServices())
if (service instanceof ClusterControllerContainer)
return true;
return false;
}
/**
* Draw <code>count</code> nodes from as many different content groups below this as possible.
* This will only achieve maximum spread in the case where the groups are balanced and never on the same
* physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy.
*/
private List<HostResource> drawContentHostsRecursively(int count, StorageGroup group) {
Set<HostResource> hosts = new HashSet<>();
if (group.getNodes().isEmpty()) {
int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size());
for (StorageGroup subgroup : group.getSubgroups())
hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, subgroup));
}
else {
hosts.addAll(group.getNodes().stream()
.filter(node -> ! node.isRetired())
.map(StorageNode::getHostResource).collect(Collectors.toList()));
}
List<HostResource> sortedHosts = new ArrayList<>(hosts);
Collections.sort(sortedHosts);
sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size()));
return sortedHosts;
}
private ContainerCluster createClusterControllers(AbstractConfigProducer parent, Collection<HostResource> hosts, String name, boolean multitenant) {
ContainerCluster clusterControllers = new ContainerCluster(parent, name, name, new ClusterControllerClusterVerifier());
List<Container> containers = new ArrayList<>();
if (clusterControllers.getContainers().isEmpty()) {
int index = 0;
for (HostResource host : hosts) {
ClusterControllerContainer clusterControllerContainer = new ClusterControllerContainer(clusterControllers, index, multitenant);
clusterControllerContainer.setHostResource(host);
clusterControllerContainer.initService();
clusterControllerContainer.setProp("clustertype", "admin")
.setProp("clustername", clusterControllers.getName())
.setProp("index", String.valueOf(index));
containers.add(clusterControllerContainer);
++index;
}
}
clusterControllers.addContainers(containers);
ContainerModelBuilder.addDefaultHandler_legacyBuilder(clusterControllers);
return clusterControllers;
}
private void addClusterControllerComponentsForThisCluster(ContainerCluster clusterControllers, ContentCluster contentCluster) {
int index = 0;
for (Container container : clusterControllers.getContainers()) {
if ( ! hasClusterControllerComponent(container))
container.addComponent(new ClusterControllerComponent());
container.addComponent(new ClusterControllerConfigurer(contentCluster, index++, clusterControllers.getContainers().size()));
}
}
private boolean hasClusterControllerComponent(Container container) {
for (Object o : container.getComponents().getComponents())
if (o instanceof ClusterControllerComponent) return true;
return false;
}
} |
How about filtering out those entries with 0 nodes? | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
Map<ClusterSpec, Set<Node>> nodesByCluster = getNodesBelongingToApplication(allNodes, applicationId).stream()
.collect(Collectors.groupingBy(
node -> node.allocation().get().membership().cluster(),
Collectors.toSet()));
Map<ClusterSpec, Set<Node>> retireableNodesByCluster = nodesByCluster.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> filterRetireableNodes(entry.getValue())));
if (retireableNodesByCluster.isEmpty()) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodesByCluster.entrySet().stream()
.flatMap(entry -> entry.getValue().stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(getNumberNodesAllowToRetireForCluster(nodesByCluster.get(entry.getKey()), MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER)))
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | entry -> filterRetireableNodes(entry.getValue()))); | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
Map<ClusterSpec, Set<Node>> nodesByCluster = getNodesBelongingToApplication(allNodes, applicationId).stream()
.collect(Collectors.groupingBy(
node -> node.allocation().get().membership().cluster(),
Collectors.toSet()));
Map<ClusterSpec, Set<Node>> retireableNodesByCluster = nodesByCluster.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> filterRetireableNodes(entry.getValue())));
if (retireableNodesByCluster.values().stream().mapToInt(Set::size).count() == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodesByCluster.entrySet().stream()
.flatMap(entry -> entry.getValue().stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(getNumberNodesAllowToRetireForCluster(nodesByCluster.get(entry.getKey()), MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER)))
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(Collection<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(Collection<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param nodes Collection of nodes that are considered for retirement
* @return Set of nodes that all should eventually be retired
*/
Set<Node> filterRetireableNodes(Collection<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param clusterNodes All the nodes allocated to an application belonging to a single cluster
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForCluster(Collection<Node> clusterNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = clusterNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(Collection<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(Collection<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(Collection<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param nodes Collection of nodes that are considered for retirement
* @return Set of nodes that all should eventually be retired
*/
Set<Node> filterRetireableNodes(Collection<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param clusterNodes All the nodes allocated to an application belonging to a single cluster
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForCluster(Collection<Node> clusterNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = clusterNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(Collection<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
Fixed. | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
Map<ClusterSpec, Set<Node>> nodesByCluster = getNodesBelongingToApplication(allNodes, applicationId).stream()
.collect(Collectors.groupingBy(
node -> node.allocation().get().membership().cluster(),
Collectors.toSet()));
Map<ClusterSpec, Set<Node>> retireableNodesByCluster = nodesByCluster.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> filterRetireableNodes(entry.getValue())));
if (retireableNodesByCluster.isEmpty()) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodesByCluster.entrySet().stream()
.flatMap(entry -> entry.getValue().stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(getNumberNodesAllowToRetireForCluster(nodesByCluster.get(entry.getKey()), MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER)))
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | entry -> filterRetireableNodes(entry.getValue()))); | void retireAllocated() {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
for (ApplicationId applicationId : activeApplications) {
Map<ClusterSpec, Set<Node>> nodesByCluster = getNodesBelongingToApplication(allNodes, applicationId).stream()
.collect(Collectors.groupingBy(
node -> node.allocation().get().membership().cluster(),
Collectors.toSet()));
Map<ClusterSpec, Set<Node>> retireableNodesByCluster = nodesByCluster.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> filterRetireableNodes(entry.getValue())));
if (retireableNodesByCluster.values().stream().mapToInt(Set::size).count() == 0) continue;
Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId, Duration.ofMinutes(30));
if ( ! deployment.isPresent()) continue;
Set<Node> replaceableNodes = retireableNodesByCluster.entrySet().stream()
.flatMap(entry -> entry.getValue().stream()
.filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
.limit(getNumberNodesAllowToRetireForCluster(nodesByCluster.get(entry.getKey()), MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER)))
.collect(Collectors.toSet());
if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
}
nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
ApplicationId app = nodes.iterator().next().allocation().get().owner();
Set<Node> nodesToRetire;
try (Mutex lock = nodeRepository().lock(app)) {
nodesToRetire = nodes.stream()
.map(node ->
nodeRepository().getNode(node.hostname())
.filter(upToDateNode -> node.state() == Node.State.active)
.filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
.flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
.collect(Collectors.toSet());
nodesToRetire.forEach(node -> {
log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
" with flavor " + node.flavor().name() +
" allocated to " + node.allocation().get().owner() + ". Policy: " +
retirementPolicy.getClass().getSimpleName());
Node updatedNode = node.with(node.status()
.withWantToRetire(true)
.withWantToDeprovision(true));
nodeRepository().write(updatedNode);
});
}
if (! nodesToRetire.isEmpty()) deployment.activate();
}));
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(Collection<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(Collection<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param nodes Collection of nodes that are considered for retirement
* @return Set of nodes that all should eventually be retired
*/
Set<Node> filterRetireableNodes(Collection<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param clusterNodes All the nodes allocated to an application belonging to a single cluster
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForCluster(Collection<Node> clusterNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = clusterNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(Collection<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} | class NodeRetirer extends Maintainer {
public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
flavorSpareCount.getNumReadyAmongReplacees() > 2;
private static final long MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER = 1;
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final Deployer deployer;
private final FlavorSpareChecker flavorSpareChecker;
private final RetirementPolicy retirementPolicy;
public NodeRetirer(NodeRepository nodeRepository, Zone zone, FlavorSpareChecker flavorSpareChecker, Duration interval,
Deployer deployer, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) {
super(nodeRepository, interval, jobControl);
if (! Arrays.asList(applies).contains(zone)) {
String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", "));
log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping.");
deconstruct();
}
this.deployer = deployer;
this.retirementPolicy = retirementPolicy;
this.flavorSpareChecker = flavorSpareChecker;
}
@Override
protected void maintain() {
if (retireUnallocated()) {
retireAllocated();
}
}
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
.filter(node -> node.state() == Node.State.ready)
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.toSet()))
.entrySet().stream()
.filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer,
"Policy: " + retirementPolicy.getClass().getSimpleName());
iter.remove();
}
if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
.collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
private List<Node> getNodesBelongingToApplication(Collection<Node> allNodes, ApplicationId applicationId) {
return allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(applicationId))
.collect(Collectors.toList());
}
/**
* Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
*/
List<ApplicationId> getActiveApplicationIds(Collection<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.collect(Collectors.groupingBy(
node -> node.allocation().get().owner(),
Collectors.counting()))
.entrySet().stream()
.sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
}
/**
* @param nodes Collection of nodes that are considered for retirement
* @return Set of nodes that all should eventually be retired
*/
Set<Node> filterRetireableNodes(Collection<Node> nodes) {
return nodes.stream()
.filter(node -> node.state() == Node.State.active)
.filter(node -> !node.status().wantToRetire())
.filter(retirementPolicy::shouldRetire)
.collect(Collectors.toSet());
}
/**
* @param clusterNodes All the nodes allocated to an application belonging to a single cluster
* @return number of nodes we can safely start retiring
*/
long getNumberNodesAllowToRetireForCluster(Collection<Node> clusterNodes, long maxSimultaneousRetires) {
long numNodesInWantToRetire = clusterNodes.stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.state() != Node.State.parked)
.count();
return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
}
private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(Collection<Node> allNodes) {
return allNodes.stream()
.collect(Collectors.groupingBy(
Node::flavor,
Collectors.groupingBy(Node::state, Collectors.counting())));
}
} |
Why another converge? Looks like the test could have just ended on line 130 | public void upToDateContainerIsUntouched() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository);
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
inOrder.verify(orchestrator).resume(hostName);
} | nodeAgent.converge(); | public void upToDateContainerIsUntouched() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository);
inOrder.verify(dockerOperations).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
inOrder.verify(dockerOperations, never()).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository, never()).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
} | class NodeAgentImplTest {
private static final double MIN_CPU_CORES = 2;
private static final double MIN_MAIN_MEMORY_AVAILABLE_GB = 16;
private static final double MIN_DISK_AVAILABLE_GB = 250;
private static final String vespaVersion = "1.2.3";
private final String hostName = "host1.test.yahoo.com";
private final ContainerName containerName = new ContainerName("host1");
private final DockerImage dockerImage = new DockerImage("dockerImage");
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final PathResolver pathResolver = mock(PathResolver.class);
private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.environment("dev")
.region("us-east-1")
.parentHostHostname("parent.host.name.yahoo.com")
.inetAddressResolver(new InetAddressResolver())
.pathResolver(pathResolver).build();
private final ContainerNodeSpec.Builder nodeSpecBuilder = new ContainerNodeSpec.Builder()
.hostname(hostName)
.nodeType("tenant")
.nodeFlavor("docker")
.minCpuCores(MIN_CPU_CORES)
.minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
.minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
@Test
@Test
public void absentContainerCausesStart() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository, aclMaintainer);
inOrder.verify(aclMaintainer, times(1)).run();
inOrder.verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void containerIsNotStoppedIfNewImageMustBePulled() throws Exception {
final DockerImage newDockerImage = new DockerImage("new-image");
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(newDockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true);
nodeAgent.converge();
verify(orchestrator, never()).suspend(any(String.class));
verify(orchestrator, never()).resume(any(String.class));
verify(dockerOperations, never()).removeContainer(any());
final InOrder inOrder = inOrder(dockerOperations);
inOrder.verify(dockerOperations, times(1)).shouldScheduleDownloadOfImage(eq(newDockerImage));
inOrder.verify(dockerOperations, times(1)).scheduleDownloadOfImage(eq(containerName), eq(newDockerImage), any());
}
@Test
public void noRestartIfOrchestratorSuspendFails() throws Exception {
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (Exception ignored) { }
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class));
}
@Test
public void failedNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.failed)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void readyNodeLeadsToNoAction() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null,false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
nodeAgent.converge();
nodeAgent.converge();
verify(dockerOperations, times(1)).getContainer(eq(containerName));
verify(dockerOperations, never()).removeContainer(any());
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
@Test
public void inactiveNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.inactive)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void reservedNodeDoesNotUpdateNodeRepoWithVersion() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.reserved)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) {
wantedRestartGeneration.ifPresent(restartGeneration -> nodeSpecBuilder
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration));
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(nodeState)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainer(any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeData(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markNodeAvailableForNewAllocation(eq(hostName));
verify(dockerOperations, never()).startContainer(eq(containerName), any());
verify(orchestrator, never()).resume(any(String.class));
verify(orchestrator, never()).suspend(any(String.class));
verify(nodeRepository, times(1)).updateNodeAttributes(
any(String.class), eq(new NodeAttributes()
.withRestartGeneration(wantedRestartGeneration.orElse(null))
.withRebootGeneration(0L)
.withDockerImage(new DockerImage(""))
.withVespaVersion("")));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty());
}
@Test
public void provisionedNodeIsMarkedAsDirty() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.provisioned)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository, times(1)).markAsDirty(eq(hostName));
}
@Test
public void testRestartDeadContainerAfterNodeAdminRestart() throws IOException {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.tick();
verify(dockerOperations, times(1)).removeContainer(any());
verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
}
@Test
public void resumeProgramRunsUntilSuccess() throws Exception {
final long restartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
final InOrder inOrder = inOrder(orchestrator, dockerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
.doNothing()
.when(dockerOperations).resumeNode(eq(containerName));
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (RuntimeException ignored) { }
inOrder.verify(dockerOperations, times(1)).resumeNode(any());
inOrder.verifyNoMoreInteractions();
nodeAgent.converge();
inOrder.verify(dockerOperations).resumeNode(any());
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(eq(hostName), any());
inOrder.verifyNoMoreInteractions();
}
@Test
public void testSetFrozen() {
NodeAgentImpl nodeAgent = spy(makeNodeAgent(dockerImage, true));
doNothing().when(nodeAgent).converge();
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(true));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertTrue(nodeAgent.setFrozen(true));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(false));
nodeAgent.tick();
verify(nodeAgent, times(2)).converge();
assertTrue(nodeAgent.setFrozen(false));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(3)).converge();
}
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
Map<String, Object> precpu_stats = dockerStats.get("precpu_stats");
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance");
ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false);
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
.minMainMemoryAvailableGb(2)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(storageMaintainer.updateIfNeededAndGetDiskMetricsFor(eq(containerName))).thenReturn(Optional.of(42547019776L));
when(storageMaintainer.getHostTotalMemoryGb()).thenReturn(10d);
when(dockerOperations.getContainerStats(eq(containerName)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
clock.advance(Duration.ofSeconds(1234));
Path pathToExpectedMetrics = Paths.get(classLoader.getResource("expected.container.system.metrics.txt").getPath());
String expectedMetrics = new String(Files.readAllBytes(pathToExpectedMetrics))
.replaceAll("\\s", "")
.replaceAll("\\n", "");
String[] expectedCommand = {"rpc_invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", expectedMetrics};
doAnswer(invocation -> {
ContainerName calledContainerName = (ContainerName) invocation.getArguments()[0];
long calledTimeout = (long) invocation.getArguments()[1];
String[] calledCommand = new String[invocation.getArguments().length - 2];
System.arraycopy(invocation.getArguments(), 2, calledCommand, 0, calledCommand.length);
calledCommand[calledCommand.length - 1] = calledCommand[calledCommand.length - 1].replaceAll("\"timestamp\":\\d+", "\"timestamp\":0");
assertEquals(containerName, calledContainerName);
assertEquals(5L, calledTimeout);
assertArrayEquals(expectedCommand, calledCommand);
return null;
}).when(dockerOperations).executeCommandInContainerAsRoot(any(), any(), anyVararg());
nodeAgent.updateContainerNodeMetrics();
}
@Test
public void testGetRelevantMetricsForReadyNode() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(Optional.empty());
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
assertEquals(Collections.emptySet(), actualMetrics);
}
private NodeAgentImpl makeNodeAgent(DockerImage dockerImage, boolean isRunning) {
Optional<Container> container = dockerImage != null ?
Optional.of(new Container(
hostName,
dockerImage,
containerName,
isRunning ? Container.State.RUNNING : Container.State.EXITED,
isRunning ? 1 : 0)) :
Optional.empty();
when(dockerOperations.getContainerStats(any())).thenReturn(Optional.of(emptyContainerStats));
when(dockerOperations.getContainer(eq(containerName))).thenReturn(container);
doNothing().when(storageMaintainer).writeFilebeatConfig(any(), any());
doNothing().when(storageMaintainer).writeMetricsConfig(any(), any());
return new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations,
Optional.of(storageMaintainer), environment, clock, Optional.of(aclMaintainer));
}
} | class NodeAgentImplTest {
private static final double MIN_CPU_CORES = 2;
private static final double MIN_MAIN_MEMORY_AVAILABLE_GB = 16;
private static final double MIN_DISK_AVAILABLE_GB = 250;
private static final String vespaVersion = "1.2.3";
private final String hostName = "host1.test.yahoo.com";
private final ContainerName containerName = new ContainerName("host1");
private final DockerImage dockerImage = new DockerImage("dockerImage");
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final PathResolver pathResolver = mock(PathResolver.class);
private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.environment("dev")
.region("us-east-1")
.parentHostHostname("parent.host.name.yahoo.com")
.inetAddressResolver(new InetAddressResolver())
.pathResolver(pathResolver).build();
private final ContainerNodeSpec.Builder nodeSpecBuilder = new ContainerNodeSpec.Builder()
.hostname(hostName)
.nodeType("tenant")
.nodeFlavor("docker")
.minCpuCores(MIN_CPU_CORES)
.minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
.minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
@Test
@Test
public void absentContainerCausesStart() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository, aclMaintainer);
inOrder.verify(aclMaintainer, times(1)).run();
inOrder.verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void containerIsNotStoppedIfNewImageMustBePulled() throws Exception {
final DockerImage newDockerImage = new DockerImage("new-image");
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(newDockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true);
nodeAgent.converge();
verify(orchestrator, never()).suspend(any(String.class));
verify(orchestrator, never()).resume(any(String.class));
verify(dockerOperations, never()).removeContainer(any());
final InOrder inOrder = inOrder(dockerOperations);
inOrder.verify(dockerOperations, times(1)).shouldScheduleDownloadOfImage(eq(newDockerImage));
inOrder.verify(dockerOperations, times(1)).scheduleDownloadOfImage(eq(containerName), eq(newDockerImage), any());
}
@Test
public void noRestartIfOrchestratorSuspendFails() throws Exception {
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (Exception ignored) { }
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class));
}
@Test
public void failedNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.failed)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void readyNodeLeadsToNoAction() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null,false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
nodeAgent.converge();
nodeAgent.converge();
verify(dockerOperations, times(1)).getContainer(eq(containerName));
verify(dockerOperations, never()).removeContainer(any());
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
@Test
public void inactiveNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.inactive)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void reservedNodeDoesNotUpdateNodeRepoWithVersion() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.reserved)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) {
wantedRestartGeneration.ifPresent(restartGeneration -> nodeSpecBuilder
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration));
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(nodeState)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainer(any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeData(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markNodeAvailableForNewAllocation(eq(hostName));
verify(dockerOperations, never()).startContainer(eq(containerName), any());
verify(orchestrator, never()).resume(any(String.class));
verify(orchestrator, never()).suspend(any(String.class));
verify(nodeRepository, times(1)).updateNodeAttributes(
any(String.class), eq(new NodeAttributes()
.withRestartGeneration(wantedRestartGeneration.orElse(null))
.withRebootGeneration(0L)
.withDockerImage(new DockerImage(""))
.withVespaVersion("")));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty());
}
@Test
public void provisionedNodeIsMarkedAsDirty() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.provisioned)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository, times(1)).markAsDirty(eq(hostName));
}
@Test
public void testRestartDeadContainerAfterNodeAdminRestart() throws IOException {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.tick();
verify(dockerOperations, times(1)).removeContainer(any());
verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
}
@Test
public void resumeProgramRunsUntilSuccess() throws Exception {
final long restartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
final InOrder inOrder = inOrder(orchestrator, dockerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
.doNothing()
.when(dockerOperations).resumeNode(eq(containerName));
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (RuntimeException ignored) { }
inOrder.verify(dockerOperations, times(1)).resumeNode(any());
inOrder.verifyNoMoreInteractions();
nodeAgent.converge();
inOrder.verify(dockerOperations).resumeNode(any());
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(eq(hostName), any());
inOrder.verifyNoMoreInteractions();
}
@Test
public void testSetFrozen() {
NodeAgentImpl nodeAgent = spy(makeNodeAgent(dockerImage, true));
doNothing().when(nodeAgent).converge();
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(true));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertTrue(nodeAgent.setFrozen(true));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(false));
nodeAgent.tick();
verify(nodeAgent, times(2)).converge();
assertTrue(nodeAgent.setFrozen(false));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(3)).converge();
}
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
Map<String, Object> precpu_stats = dockerStats.get("precpu_stats");
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance");
ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false);
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
.minMainMemoryAvailableGb(2)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(storageMaintainer.updateIfNeededAndGetDiskMetricsFor(eq(containerName))).thenReturn(Optional.of(42547019776L));
when(storageMaintainer.getHostTotalMemoryGb()).thenReturn(10d);
when(dockerOperations.getContainerStats(eq(containerName)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
clock.advance(Duration.ofSeconds(1234));
Path pathToExpectedMetrics = Paths.get(classLoader.getResource("expected.container.system.metrics.txt").getPath());
String expectedMetrics = new String(Files.readAllBytes(pathToExpectedMetrics))
.replaceAll("\\s", "")
.replaceAll("\\n", "");
String[] expectedCommand = {"rpc_invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", expectedMetrics};
doAnswer(invocation -> {
ContainerName calledContainerName = (ContainerName) invocation.getArguments()[0];
long calledTimeout = (long) invocation.getArguments()[1];
String[] calledCommand = new String[invocation.getArguments().length - 2];
System.arraycopy(invocation.getArguments(), 2, calledCommand, 0, calledCommand.length);
calledCommand[calledCommand.length - 1] = calledCommand[calledCommand.length - 1].replaceAll("\"timestamp\":\\d+", "\"timestamp\":0");
assertEquals(containerName, calledContainerName);
assertEquals(5L, calledTimeout);
assertArrayEquals(expectedCommand, calledCommand);
return null;
}).when(dockerOperations).executeCommandInContainerAsRoot(any(), any(), anyVararg());
nodeAgent.updateContainerNodeMetrics();
}
@Test
public void testGetRelevantMetricsForReadyNode() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(Optional.empty());
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
assertEquals(Collections.emptySet(), actualMetrics);
}
private NodeAgentImpl makeNodeAgent(DockerImage dockerImage, boolean isRunning) {
Optional<Container> container = dockerImage != null ?
Optional.of(new Container(
hostName,
dockerImage,
containerName,
isRunning ? Container.State.RUNNING : Container.State.EXITED,
isRunning ? 1 : 0)) :
Optional.empty();
when(dockerOperations.getContainerStats(any())).thenReturn(Optional.of(emptyContainerStats));
when(dockerOperations.getContainer(eq(containerName))).thenReturn(container);
doNothing().when(storageMaintainer).writeFilebeatConfig(any(), any());
doNothing().when(storageMaintainer).writeMetricsConfig(any(), any());
return new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations,
Optional.of(storageMaintainer), environment, clock, Optional.of(aclMaintainer));
}
} |
This was an attempt to do the TODOs and verify certain operations are not done on the 2nd converge. Updated to actually do the TODOs. | public void upToDateContainerIsUntouched() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository);
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
inOrder.verify(orchestrator).resume(hostName);
} | nodeAgent.converge(); | public void upToDateContainerIsUntouched() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository);
inOrder.verify(dockerOperations).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
inOrder.verify(dockerOperations, never()).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository, never()).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
} | class NodeAgentImplTest {
private static final double MIN_CPU_CORES = 2;
private static final double MIN_MAIN_MEMORY_AVAILABLE_GB = 16;
private static final double MIN_DISK_AVAILABLE_GB = 250;
private static final String vespaVersion = "1.2.3";
private final String hostName = "host1.test.yahoo.com";
private final ContainerName containerName = new ContainerName("host1");
private final DockerImage dockerImage = new DockerImage("dockerImage");
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final PathResolver pathResolver = mock(PathResolver.class);
private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.environment("dev")
.region("us-east-1")
.parentHostHostname("parent.host.name.yahoo.com")
.inetAddressResolver(new InetAddressResolver())
.pathResolver(pathResolver).build();
private final ContainerNodeSpec.Builder nodeSpecBuilder = new ContainerNodeSpec.Builder()
.hostname(hostName)
.nodeType("tenant")
.nodeFlavor("docker")
.minCpuCores(MIN_CPU_CORES)
.minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
.minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
@Test
@Test
public void absentContainerCausesStart() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository, aclMaintainer);
inOrder.verify(aclMaintainer, times(1)).run();
inOrder.verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void containerIsNotStoppedIfNewImageMustBePulled() throws Exception {
final DockerImage newDockerImage = new DockerImage("new-image");
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(newDockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true);
nodeAgent.converge();
verify(orchestrator, never()).suspend(any(String.class));
verify(orchestrator, never()).resume(any(String.class));
verify(dockerOperations, never()).removeContainer(any());
final InOrder inOrder = inOrder(dockerOperations);
inOrder.verify(dockerOperations, times(1)).shouldScheduleDownloadOfImage(eq(newDockerImage));
inOrder.verify(dockerOperations, times(1)).scheduleDownloadOfImage(eq(containerName), eq(newDockerImage), any());
}
@Test
public void noRestartIfOrchestratorSuspendFails() throws Exception {
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (Exception ignored) { }
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class));
}
@Test
public void failedNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.failed)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void readyNodeLeadsToNoAction() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null,false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
nodeAgent.converge();
nodeAgent.converge();
verify(dockerOperations, times(1)).getContainer(eq(containerName));
verify(dockerOperations, never()).removeContainer(any());
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
@Test
public void inactiveNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.inactive)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void reservedNodeDoesNotUpdateNodeRepoWithVersion() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.reserved)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) {
wantedRestartGeneration.ifPresent(restartGeneration -> nodeSpecBuilder
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration));
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(nodeState)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainer(any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeData(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markNodeAvailableForNewAllocation(eq(hostName));
verify(dockerOperations, never()).startContainer(eq(containerName), any());
verify(orchestrator, never()).resume(any(String.class));
verify(orchestrator, never()).suspend(any(String.class));
verify(nodeRepository, times(1)).updateNodeAttributes(
any(String.class), eq(new NodeAttributes()
.withRestartGeneration(wantedRestartGeneration.orElse(null))
.withRebootGeneration(0L)
.withDockerImage(new DockerImage(""))
.withVespaVersion("")));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty());
}
@Test
public void provisionedNodeIsMarkedAsDirty() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.provisioned)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository, times(1)).markAsDirty(eq(hostName));
}
@Test
public void testRestartDeadContainerAfterNodeAdminRestart() throws IOException {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.tick();
verify(dockerOperations, times(1)).removeContainer(any());
verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
}
@Test
public void resumeProgramRunsUntilSuccess() throws Exception {
final long restartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
final InOrder inOrder = inOrder(orchestrator, dockerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
.doNothing()
.when(dockerOperations).resumeNode(eq(containerName));
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (RuntimeException ignored) { }
inOrder.verify(dockerOperations, times(1)).resumeNode(any());
inOrder.verifyNoMoreInteractions();
nodeAgent.converge();
inOrder.verify(dockerOperations).resumeNode(any());
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(eq(hostName), any());
inOrder.verifyNoMoreInteractions();
}
@Test
public void testSetFrozen() {
NodeAgentImpl nodeAgent = spy(makeNodeAgent(dockerImage, true));
doNothing().when(nodeAgent).converge();
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(true));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertTrue(nodeAgent.setFrozen(true));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(false));
nodeAgent.tick();
verify(nodeAgent, times(2)).converge();
assertTrue(nodeAgent.setFrozen(false));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(3)).converge();
}
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
Map<String, Object> precpu_stats = dockerStats.get("precpu_stats");
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance");
ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false);
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
.minMainMemoryAvailableGb(2)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(storageMaintainer.updateIfNeededAndGetDiskMetricsFor(eq(containerName))).thenReturn(Optional.of(42547019776L));
when(storageMaintainer.getHostTotalMemoryGb()).thenReturn(10d);
when(dockerOperations.getContainerStats(eq(containerName)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
clock.advance(Duration.ofSeconds(1234));
Path pathToExpectedMetrics = Paths.get(classLoader.getResource("expected.container.system.metrics.txt").getPath());
String expectedMetrics = new String(Files.readAllBytes(pathToExpectedMetrics))
.replaceAll("\\s", "")
.replaceAll("\\n", "");
String[] expectedCommand = {"rpc_invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", expectedMetrics};
doAnswer(invocation -> {
ContainerName calledContainerName = (ContainerName) invocation.getArguments()[0];
long calledTimeout = (long) invocation.getArguments()[1];
String[] calledCommand = new String[invocation.getArguments().length - 2];
System.arraycopy(invocation.getArguments(), 2, calledCommand, 0, calledCommand.length);
calledCommand[calledCommand.length - 1] = calledCommand[calledCommand.length - 1].replaceAll("\"timestamp\":\\d+", "\"timestamp\":0");
assertEquals(containerName, calledContainerName);
assertEquals(5L, calledTimeout);
assertArrayEquals(expectedCommand, calledCommand);
return null;
}).when(dockerOperations).executeCommandInContainerAsRoot(any(), any(), anyVararg());
nodeAgent.updateContainerNodeMetrics();
}
@Test
public void testGetRelevantMetricsForReadyNode() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(Optional.empty());
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
assertEquals(Collections.emptySet(), actualMetrics);
}
private NodeAgentImpl makeNodeAgent(DockerImage dockerImage, boolean isRunning) {
Optional<Container> container = dockerImage != null ?
Optional.of(new Container(
hostName,
dockerImage,
containerName,
isRunning ? Container.State.RUNNING : Container.State.EXITED,
isRunning ? 1 : 0)) :
Optional.empty();
when(dockerOperations.getContainerStats(any())).thenReturn(Optional.of(emptyContainerStats));
when(dockerOperations.getContainer(eq(containerName))).thenReturn(container);
doNothing().when(storageMaintainer).writeFilebeatConfig(any(), any());
doNothing().when(storageMaintainer).writeMetricsConfig(any(), any());
return new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations,
Optional.of(storageMaintainer), environment, clock, Optional.of(aclMaintainer));
}
} | class NodeAgentImplTest {
private static final double MIN_CPU_CORES = 2;
private static final double MIN_MAIN_MEMORY_AVAILABLE_GB = 16;
private static final double MIN_DISK_AVAILABLE_GB = 250;
private static final String vespaVersion = "1.2.3";
private final String hostName = "host1.test.yahoo.com";
private final ContainerName containerName = new ContainerName("host1");
private final DockerImage dockerImage = new DockerImage("dockerImage");
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final PathResolver pathResolver = mock(PathResolver.class);
private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.environment("dev")
.region("us-east-1")
.parentHostHostname("parent.host.name.yahoo.com")
.inetAddressResolver(new InetAddressResolver())
.pathResolver(pathResolver).build();
private final ContainerNodeSpec.Builder nodeSpecBuilder = new ContainerNodeSpec.Builder()
.hostname(hostName)
.nodeType("tenant")
.nodeFlavor("docker")
.minCpuCores(MIN_CPU_CORES)
.minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
.minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
@Test
@Test
public void absentContainerCausesStart() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository, aclMaintainer);
inOrder.verify(aclMaintainer, times(1)).run();
inOrder.verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void containerIsNotStoppedIfNewImageMustBePulled() throws Exception {
final DockerImage newDockerImage = new DockerImage("new-image");
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(newDockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true);
nodeAgent.converge();
verify(orchestrator, never()).suspend(any(String.class));
verify(orchestrator, never()).resume(any(String.class));
verify(dockerOperations, never()).removeContainer(any());
final InOrder inOrder = inOrder(dockerOperations);
inOrder.verify(dockerOperations, times(1)).shouldScheduleDownloadOfImage(eq(newDockerImage));
inOrder.verify(dockerOperations, times(1)).scheduleDownloadOfImage(eq(containerName), eq(newDockerImage), any());
}
@Test
public void noRestartIfOrchestratorSuspendFails() throws Exception {
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (Exception ignored) { }
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class));
}
@Test
public void failedNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.failed)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void readyNodeLeadsToNoAction() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null,false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
nodeAgent.converge();
nodeAgent.converge();
verify(dockerOperations, times(1)).getContainer(eq(containerName));
verify(dockerOperations, never()).removeContainer(any());
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
@Test
public void inactiveNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.inactive)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void reservedNodeDoesNotUpdateNodeRepoWithVersion() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.reserved)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) {
wantedRestartGeneration.ifPresent(restartGeneration -> nodeSpecBuilder
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration));
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(nodeState)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainer(any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeData(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markNodeAvailableForNewAllocation(eq(hostName));
verify(dockerOperations, never()).startContainer(eq(containerName), any());
verify(orchestrator, never()).resume(any(String.class));
verify(orchestrator, never()).suspend(any(String.class));
verify(nodeRepository, times(1)).updateNodeAttributes(
any(String.class), eq(new NodeAttributes()
.withRestartGeneration(wantedRestartGeneration.orElse(null))
.withRebootGeneration(0L)
.withDockerImage(new DockerImage(""))
.withVespaVersion("")));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty());
}
@Test
public void provisionedNodeIsMarkedAsDirty() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.provisioned)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository, times(1)).markAsDirty(eq(hostName));
}
@Test
public void testRestartDeadContainerAfterNodeAdminRestart() throws IOException {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.tick();
verify(dockerOperations, times(1)).removeContainer(any());
verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
}
@Test
public void resumeProgramRunsUntilSuccess() throws Exception {
final long restartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
final InOrder inOrder = inOrder(orchestrator, dockerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
.doNothing()
.when(dockerOperations).resumeNode(eq(containerName));
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (RuntimeException ignored) { }
inOrder.verify(dockerOperations, times(1)).resumeNode(any());
inOrder.verifyNoMoreInteractions();
nodeAgent.converge();
inOrder.verify(dockerOperations).resumeNode(any());
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(eq(hostName), any());
inOrder.verifyNoMoreInteractions();
}
@Test
public void testSetFrozen() {
NodeAgentImpl nodeAgent = spy(makeNodeAgent(dockerImage, true));
doNothing().when(nodeAgent).converge();
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(true));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertTrue(nodeAgent.setFrozen(true));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(false));
nodeAgent.tick();
verify(nodeAgent, times(2)).converge();
assertTrue(nodeAgent.setFrozen(false));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(3)).converge();
}
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
Map<String, Object> precpu_stats = dockerStats.get("precpu_stats");
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance");
ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false);
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
.minMainMemoryAvailableGb(2)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(storageMaintainer.updateIfNeededAndGetDiskMetricsFor(eq(containerName))).thenReturn(Optional.of(42547019776L));
when(storageMaintainer.getHostTotalMemoryGb()).thenReturn(10d);
when(dockerOperations.getContainerStats(eq(containerName)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
clock.advance(Duration.ofSeconds(1234));
Path pathToExpectedMetrics = Paths.get(classLoader.getResource("expected.container.system.metrics.txt").getPath());
String expectedMetrics = new String(Files.readAllBytes(pathToExpectedMetrics))
.replaceAll("\\s", "")
.replaceAll("\\n", "");
String[] expectedCommand = {"rpc_invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", expectedMetrics};
doAnswer(invocation -> {
ContainerName calledContainerName = (ContainerName) invocation.getArguments()[0];
long calledTimeout = (long) invocation.getArguments()[1];
String[] calledCommand = new String[invocation.getArguments().length - 2];
System.arraycopy(invocation.getArguments(), 2, calledCommand, 0, calledCommand.length);
calledCommand[calledCommand.length - 1] = calledCommand[calledCommand.length - 1].replaceAll("\"timestamp\":\\d+", "\"timestamp\":0");
assertEquals(containerName, calledContainerName);
assertEquals(5L, calledTimeout);
assertArrayEquals(expectedCommand, calledCommand);
return null;
}).when(dockerOperations).executeCommandInContainerAsRoot(any(), any(), anyVararg());
nodeAgent.updateContainerNodeMetrics();
}
@Test
public void testGetRelevantMetricsForReadyNode() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(Optional.empty());
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
assertEquals(Collections.emptySet(), actualMetrics);
}
private NodeAgentImpl makeNodeAgent(DockerImage dockerImage, boolean isRunning) {
Optional<Container> container = dockerImage != null ?
Optional.of(new Container(
hostName,
dockerImage,
containerName,
isRunning ? Container.State.RUNNING : Container.State.EXITED,
isRunning ? 1 : 0)) :
Optional.empty();
when(dockerOperations.getContainerStats(any())).thenReturn(Optional.of(emptyContainerStats));
when(dockerOperations.getContainer(eq(containerName))).thenReturn(container);
doNothing().when(storageMaintainer).writeFilebeatConfig(any(), any());
doNothing().when(storageMaintainer).writeMetricsConfig(any(), any());
return new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations,
Optional.of(storageMaintainer), environment, clock, Optional.of(aclMaintainer));
}
} |
:+1: | public void upToDateContainerIsUntouched() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository);
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
inOrder.verify(orchestrator).resume(hostName);
} | nodeAgent.converge(); | public void upToDateContainerIsUntouched() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository);
inOrder.verify(dockerOperations).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
inOrder.verify(dockerOperations, never()).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository, never()).updateNodeAttributes(
hostName,
new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
} | class NodeAgentImplTest {
private static final double MIN_CPU_CORES = 2;
private static final double MIN_MAIN_MEMORY_AVAILABLE_GB = 16;
private static final double MIN_DISK_AVAILABLE_GB = 250;
private static final String vespaVersion = "1.2.3";
private final String hostName = "host1.test.yahoo.com";
private final ContainerName containerName = new ContainerName("host1");
private final DockerImage dockerImage = new DockerImage("dockerImage");
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final PathResolver pathResolver = mock(PathResolver.class);
private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.environment("dev")
.region("us-east-1")
.parentHostHostname("parent.host.name.yahoo.com")
.inetAddressResolver(new InetAddressResolver())
.pathResolver(pathResolver).build();
private final ContainerNodeSpec.Builder nodeSpecBuilder = new ContainerNodeSpec.Builder()
.hostname(hostName)
.nodeType("tenant")
.nodeFlavor("docker")
.minCpuCores(MIN_CPU_CORES)
.minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
.minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
@Test
@Test
public void absentContainerCausesStart() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository, aclMaintainer);
inOrder.verify(aclMaintainer, times(1)).run();
inOrder.verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void containerIsNotStoppedIfNewImageMustBePulled() throws Exception {
final DockerImage newDockerImage = new DockerImage("new-image");
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(newDockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true);
nodeAgent.converge();
verify(orchestrator, never()).suspend(any(String.class));
verify(orchestrator, never()).resume(any(String.class));
verify(dockerOperations, never()).removeContainer(any());
final InOrder inOrder = inOrder(dockerOperations);
inOrder.verify(dockerOperations, times(1)).shouldScheduleDownloadOfImage(eq(newDockerImage));
inOrder.verify(dockerOperations, times(1)).scheduleDownloadOfImage(eq(containerName), eq(newDockerImage), any());
}
@Test
public void noRestartIfOrchestratorSuspendFails() throws Exception {
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (Exception ignored) { }
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class));
}
@Test
public void failedNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.failed)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void readyNodeLeadsToNoAction() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null,false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
nodeAgent.converge();
nodeAgent.converge();
verify(dockerOperations, times(1)).getContainer(eq(containerName));
verify(dockerOperations, never()).removeContainer(any());
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
@Test
public void inactiveNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.inactive)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void reservedNodeDoesNotUpdateNodeRepoWithVersion() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.reserved)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) {
wantedRestartGeneration.ifPresent(restartGeneration -> nodeSpecBuilder
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration));
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(nodeState)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainer(any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeData(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markNodeAvailableForNewAllocation(eq(hostName));
verify(dockerOperations, never()).startContainer(eq(containerName), any());
verify(orchestrator, never()).resume(any(String.class));
verify(orchestrator, never()).suspend(any(String.class));
verify(nodeRepository, times(1)).updateNodeAttributes(
any(String.class), eq(new NodeAttributes()
.withRestartGeneration(wantedRestartGeneration.orElse(null))
.withRebootGeneration(0L)
.withDockerImage(new DockerImage(""))
.withVespaVersion("")));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty());
}
@Test
public void provisionedNodeIsMarkedAsDirty() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.provisioned)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository, times(1)).markAsDirty(eq(hostName));
}
@Test
public void testRestartDeadContainerAfterNodeAdminRestart() throws IOException {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.tick();
verify(dockerOperations, times(1)).removeContainer(any());
verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
}
@Test
public void resumeProgramRunsUntilSuccess() throws Exception {
final long restartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
final InOrder inOrder = inOrder(orchestrator, dockerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
.doNothing()
.when(dockerOperations).resumeNode(eq(containerName));
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (RuntimeException ignored) { }
inOrder.verify(dockerOperations, times(1)).resumeNode(any());
inOrder.verifyNoMoreInteractions();
nodeAgent.converge();
inOrder.verify(dockerOperations).resumeNode(any());
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(eq(hostName), any());
inOrder.verifyNoMoreInteractions();
}
@Test
public void testSetFrozen() {
NodeAgentImpl nodeAgent = spy(makeNodeAgent(dockerImage, true));
doNothing().when(nodeAgent).converge();
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(true));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertTrue(nodeAgent.setFrozen(true));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(false));
nodeAgent.tick();
verify(nodeAgent, times(2)).converge();
assertTrue(nodeAgent.setFrozen(false));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(3)).converge();
}
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
Map<String, Object> precpu_stats = dockerStats.get("precpu_stats");
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance");
ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false);
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
.minMainMemoryAvailableGb(2)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(storageMaintainer.updateIfNeededAndGetDiskMetricsFor(eq(containerName))).thenReturn(Optional.of(42547019776L));
when(storageMaintainer.getHostTotalMemoryGb()).thenReturn(10d);
when(dockerOperations.getContainerStats(eq(containerName)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
clock.advance(Duration.ofSeconds(1234));
Path pathToExpectedMetrics = Paths.get(classLoader.getResource("expected.container.system.metrics.txt").getPath());
String expectedMetrics = new String(Files.readAllBytes(pathToExpectedMetrics))
.replaceAll("\\s", "")
.replaceAll("\\n", "");
String[] expectedCommand = {"rpc_invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", expectedMetrics};
doAnswer(invocation -> {
ContainerName calledContainerName = (ContainerName) invocation.getArguments()[0];
long calledTimeout = (long) invocation.getArguments()[1];
String[] calledCommand = new String[invocation.getArguments().length - 2];
System.arraycopy(invocation.getArguments(), 2, calledCommand, 0, calledCommand.length);
calledCommand[calledCommand.length - 1] = calledCommand[calledCommand.length - 1].replaceAll("\"timestamp\":\\d+", "\"timestamp\":0");
assertEquals(containerName, calledContainerName);
assertEquals(5L, calledTimeout);
assertArrayEquals(expectedCommand, calledCommand);
return null;
}).when(dockerOperations).executeCommandInContainerAsRoot(any(), any(), anyVararg());
nodeAgent.updateContainerNodeMetrics();
}
@Test
public void testGetRelevantMetricsForReadyNode() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(Optional.empty());
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
assertEquals(Collections.emptySet(), actualMetrics);
}
private NodeAgentImpl makeNodeAgent(DockerImage dockerImage, boolean isRunning) {
Optional<Container> container = dockerImage != null ?
Optional.of(new Container(
hostName,
dockerImage,
containerName,
isRunning ? Container.State.RUNNING : Container.State.EXITED,
isRunning ? 1 : 0)) :
Optional.empty();
when(dockerOperations.getContainerStats(any())).thenReturn(Optional.of(emptyContainerStats));
when(dockerOperations.getContainer(eq(containerName))).thenReturn(container);
doNothing().when(storageMaintainer).writeFilebeatConfig(any(), any());
doNothing().when(storageMaintainer).writeMetricsConfig(any(), any());
return new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations,
Optional.of(storageMaintainer), environment, clock, Optional.of(aclMaintainer));
}
} | class NodeAgentImplTest {
private static final double MIN_CPU_CORES = 2;
private static final double MIN_MAIN_MEMORY_AVAILABLE_GB = 16;
private static final double MIN_DISK_AVAILABLE_GB = 250;
private static final String vespaVersion = "1.2.3";
private final String hostName = "host1.test.yahoo.com";
private final ContainerName containerName = new ContainerName("host1");
private final DockerImage dockerImage = new DockerImage("dockerImage");
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final PathResolver pathResolver = mock(PathResolver.class);
private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.environment("dev")
.region("us-east-1")
.parentHostHostname("parent.host.name.yahoo.com")
.inetAddressResolver(new InetAddressResolver())
.pathResolver(pathResolver).build();
private final ContainerNodeSpec.Builder nodeSpecBuilder = new ContainerNodeSpec.Builder()
.hostname(hostName)
.nodeType("tenant")
.nodeFlavor("docker")
.minCpuCores(MIN_CPU_CORES)
.minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
.minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
@Test
@Test
public void absentContainerCausesStart() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).suspend(any(String.class));
verify(dockerOperations, never()).scheduleDownloadOfImage(eq(containerName), any(), any());
final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository, aclMaintainer);
inOrder.verify(aclMaintainer, times(1)).run();
inOrder.verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
inOrder.verify(dockerOperations, times(1)).resumeNode(eq(containerName));
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void containerIsNotStoppedIfNewImageMustBePulled() throws Exception {
final DockerImage newDockerImage = new DockerImage("new-image");
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(newDockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true);
nodeAgent.converge();
verify(orchestrator, never()).suspend(any(String.class));
verify(orchestrator, never()).resume(any(String.class));
verify(dockerOperations, never()).removeContainer(any());
final InOrder inOrder = inOrder(dockerOperations);
inOrder.verify(dockerOperations, times(1)).shouldScheduleDownloadOfImage(eq(newDockerImage));
inOrder.verify(dockerOperations, times(1)).scheduleDownloadOfImage(eq(containerName), eq(newDockerImage), any());
}
@Test
public void noRestartIfOrchestratorSuspendFails() throws Exception {
final long wantedRestartGeneration = 2;
final long currentRestartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (Exception ignored) { }
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class));
}
@Test
public void failedNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.failed)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void readyNodeLeadsToNoAction() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null,false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
nodeAgent.converge();
nodeAgent.converge();
verify(dockerOperations, times(1)).getContainer(eq(containerName));
verify(dockerOperations, never()).removeContainer(any());
verify(dockerOperations, never()).startContainer(eq(containerName), eq(nodeSpec));
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
@Test
public void inactiveNodeRunningContainerShouldStillBeRunning() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.inactive)
.wantedVespaVersion(vespaVersion)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, never()).removeContainer(any());
verify(orchestrator, never()).resume(any(String.class));
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(dockerImage)
.withVespaVersion(vespaVersion));
}
@Test
public void reservedNodeDoesNotUpdateNodeRepoWithVersion() throws Exception {
final long restartGeneration = 1;
final long rebootGeneration = 0;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.reserved)
.wantedVespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.wantedRebootGeneration(rebootGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository).updateNodeAttributes(
hostName, new NodeAttributes()
.withRestartGeneration(restartGeneration)
.withRebootGeneration(rebootGeneration)
.withDockerImage(new DockerImage(""))
.withVespaVersion(""));
}
private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) {
wantedRestartGeneration.ifPresent(restartGeneration -> nodeSpecBuilder
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration));
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(nodeState)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainer(any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeData(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markNodeAvailableForNewAllocation(eq(hostName));
verify(dockerOperations, never()).startContainer(eq(containerName), any());
verify(orchestrator, never()).resume(any(String.class));
verify(orchestrator, never()).suspend(any(String.class));
verify(nodeRepository, times(1)).updateNodeAttributes(
any(String.class), eq(new NodeAttributes()
.withRestartGeneration(wantedRestartGeneration.orElse(null))
.withRebootGeneration(0L)
.withDockerImage(new DockerImage(""))
.withVespaVersion("")));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L));
}
@Test
public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() {
nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty());
}
@Test
public void provisionedNodeIsMarkedAsDirty() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.nodeState(Node.State.provisioned)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec));
nodeAgent.converge();
verify(nodeRepository, times(1)).markAsDirty(eq(hostName));
}
@Test
public void testRestartDeadContainerAfterNodeAdminRestart() throws IOException {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(pathResolver.getApplicationStoragePathForNodeAdmin()).thenReturn(Files.createTempDirectory("foo"));
nodeAgent.tick();
verify(dockerOperations, times(1)).removeContainer(any());
verify(dockerOperations, times(1)).startContainer(eq(containerName), eq(nodeSpec));
}
@Test
public void resumeProgramRunsUntilSuccess() throws Exception {
final long restartGeneration = 1;
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.wantedRestartGeneration(restartGeneration)
.currentRestartGeneration(restartGeneration)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
final InOrder inOrder = inOrder(orchestrator, dockerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
.doNothing()
.when(dockerOperations).resumeNode(eq(containerName));
try {
nodeAgent.converge();
fail("Expected to throw an exception");
} catch (RuntimeException ignored) { }
inOrder.verify(dockerOperations, times(1)).resumeNode(any());
inOrder.verifyNoMoreInteractions();
nodeAgent.converge();
inOrder.verify(dockerOperations).resumeNode(any());
inOrder.verify(orchestrator).resume(hostName);
inOrder.verify(nodeRepository).updateNodeAttributes(eq(hostName), any());
inOrder.verifyNoMoreInteractions();
}
@Test
public void testSetFrozen() {
NodeAgentImpl nodeAgent = spy(makeNodeAgent(dockerImage, true));
doNothing().when(nodeAgent).converge();
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(true));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertTrue(nodeAgent.setFrozen(true));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(1)).converge();
assertFalse(nodeAgent.setFrozen(false));
nodeAgent.tick();
verify(nodeAgent, times(2)).converge();
assertTrue(nodeAgent.setFrozen(false));
clock.advance(Duration.ofSeconds(35));
nodeAgent.tick();
verify(nodeAgent, times(3)).converge();
}
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
Map<String, Object> precpu_stats = dockerStats.get("precpu_stats");
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance");
ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false);
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.nodeState(Node.State.active)
.vespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
.minMainMemoryAvailableGb(2)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(storageMaintainer.updateIfNeededAndGetDiskMetricsFor(eq(containerName))).thenReturn(Optional.of(42547019776L));
when(storageMaintainer.getHostTotalMemoryGb()).thenReturn(10d);
when(dockerOperations.getContainerStats(eq(containerName)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
clock.advance(Duration.ofSeconds(1234));
Path pathToExpectedMetrics = Paths.get(classLoader.getResource("expected.container.system.metrics.txt").getPath());
String expectedMetrics = new String(Files.readAllBytes(pathToExpectedMetrics))
.replaceAll("\\s", "")
.replaceAll("\\n", "");
String[] expectedCommand = {"rpc_invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", expectedMetrics};
doAnswer(invocation -> {
ContainerName calledContainerName = (ContainerName) invocation.getArguments()[0];
long calledTimeout = (long) invocation.getArguments()[1];
String[] calledCommand = new String[invocation.getArguments().length - 2];
System.arraycopy(invocation.getArguments(), 2, calledCommand, 0, calledCommand.length);
calledCommand[calledCommand.length - 1] = calledCommand[calledCommand.length - 1].replaceAll("\"timestamp\":\\d+", "\"timestamp\":0");
assertEquals(containerName, calledContainerName);
assertEquals(5L, calledTimeout);
assertArrayEquals(expectedCommand, calledCommand);
return null;
}).when(dockerOperations).executeCommandInContainerAsRoot(any(), any(), anyVararg());
nodeAgent.updateContainerNodeMetrics();
}
@Test
public void testGetRelevantMetricsForReadyNode() throws Exception {
final ContainerNodeSpec nodeSpec = nodeSpecBuilder
.nodeState(Node.State.ready)
.build();
NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec));
when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(Optional.empty());
nodeAgent.converge();
nodeAgent.updateContainerNodeMetrics();
Set<Map<String, Object>> actualMetrics = metricReceiver.getAllMetricsRaw();
assertEquals(Collections.emptySet(), actualMetrics);
}
private NodeAgentImpl makeNodeAgent(DockerImage dockerImage, boolean isRunning) {
Optional<Container> container = dockerImage != null ?
Optional.of(new Container(
hostName,
dockerImage,
containerName,
isRunning ? Container.State.RUNNING : Container.State.EXITED,
isRunning ? 1 : 0)) :
Optional.empty();
when(dockerOperations.getContainerStats(any())).thenReturn(Optional.of(emptyContainerStats));
when(dockerOperations.getContainer(eq(containerName))).thenReturn(container);
doNothing().when(storageMaintainer).writeFilebeatConfig(any(), any());
doNothing().when(storageMaintainer).writeMetricsConfig(any(), any());
return new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations,
Optional.of(storageMaintainer), environment, clock, Optional.of(aclMaintainer));
}
} |
I don't see you're testing with multiple ClusterSpec with the same cluster id? | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | |
There are a multiple apps of different sizes in the test: https://github.com/yahoo/vespa/blob/e2934af04a9da9562d29ae4778f0c36bab063f8e/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTest.java#L38:L43 Some of them have even and some have uneven number of nodes. This change will make the apps with even number of nodes in a cluster into 2 groups, while the uneven numbered clusters are only a single group. Having this change without the change is `NodeRetirer` will fail the test as it retires too many nodes. | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | |
It could be me just not understanding the test framework, but I'm looking for an application that has at least 2 different ClusterSpecs with the same ClusterSpec.Id. It looks rather that a single ClusterSpec is made (with a unique ClusterSpec.Id) that pretends to be a ClusterSpec for a group in a multi-group cluster? | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | |
Here's how it looks if you group by `ClusterSpec` vs `ClusterSpec.Id`: ```java tester.nodeRepository.getNodes().stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.toSet())) .forEach((application, nodes) -> { System.out.println("\n" + application.toShortString()); nodes.stream() .collect(Collectors.groupingBy(node -> node.allocation().get().membership().cluster(), Collectors.counting())) .forEach(((clusterSpec, count) -> System.out.println(clusterSpec + ": " + count))); nodes.stream() .collect(Collectors.groupingBy(node -> node.allocation().get().membership().cluster().id(), Collectors.counting())) .forEach(((clusterSpecId, count) -> System.out.println(clusterSpecId + ": " + count))); }); ``` Result: ``` sports.results container cluster 'cluster-0' group 1 6.99: 3 container cluster 'cluster-0' group 0 6.99: 3 cluster 'cluster-0': 6 search.images container cluster 'cluster-0' group 1 6.99: 2 container cluster 'cluster-0' group 0 6.99: 2 cluster 'cluster-0': 4 vespa.notes container cluster 'cluster-0' group 0 6.99: 3 cluster 'cluster-0': 3 vespa.calendar container cluster 'cluster-0' group 0 6.99: 7 cluster 'cluster-0': 7 search.videos container cluster 'cluster-0' group 1 6.99: 1 container cluster 'cluster-0' group 0 6.99: 1 cluster 'cluster-0': 2 tester.my-app container cluster 'cluster-0' group 1 6.99: 2 container cluster 'cluster-1' group 1 6.99: 3 container cluster 'cluster-0' group 0 6.99: 2 container cluster 'cluster-1' group 0 6.99: 3 cluster 'cluster-1': 6 cluster 'cluster-0': 4 ``` Notice that each app only has 1 entry when grouped by `ClusterSpec.Id` despite some of them having multiple when grouped by `ClusterSpec`. All app cluster that have even number of nodes have 2 groups, so you get 2 `ClusterSpecs`, but only 1 `ClusterSpec.Id`. | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | |
huh, so that seems to do what I would like to see. It looks like the ClusterSpec created here is a template with no groupid, that will be used to create a number of ClusterSpec later on given by numGroups? If so, that's what confused me. | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | |
Confirmed - the ClusterSpec is used as a template to create one ClusterSpec for each group. | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
for (int i = 0; i < flavorIds.length; i++) {
Flavor flavor = flavors.get(flavorIds[i]);
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"));
Capacity capacity = Capacity.fromNodeCount(numNodes[i], flavor.name());
int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
}
apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | class NodeRetirerTester {
public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
public final ManualClock clock = new ManualClock();
public final NodeRepository nodeRepository;
private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
private final MockDeployer deployer;
private final JobControl jobControl;
private final List<Flavor> flavors;
private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
private RetiredExpirer retiredExpirer;
private InactiveExpirer inactiveExpirer;
private int nextNodeId = 0;
NodeRetirerTester(NodeFlavors nodeFlavors) {
Curator curator = new MockCurator();
nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
new DockerImage("docker-registry.domain.tld:8080/dist/vespa"));
jobControl = new JobControl(nodeRepository.database());
NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
deployer = new MockDeployer(provisioner, apps);
flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
}
NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
return new NodeRetirer(nodeRepository, zone, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy);
}
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
nodeRepository.setReady(nodes);
}
void iterateMaintainers() {
if (retiredExpirer == null) {
retiredExpirer = new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofMinutes(10), jobControl);
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl);
}
clock.advance(Duration.ofMinutes(11));
retiredExpirer.maintain();
clock.advance(Duration.ofMinutes(11));
inactiveExpirer.maintain();
}
void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1 ] = false;
when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
for (int i = 0; i < numAllowed.length; i++) {
Boolean[] responses = new Boolean[numAllowed[i]];
Arrays.fill(responses, true);
responses[responses.length - 1] = false;
when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
}
}
void assertCountsForStateByFlavor(Node.State state, long... nums) {
Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
.collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
assertEquals(expected, actual);
}
void assertParkedCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes(Node.State.parked).stream()
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
void assertRetiringCountsByApplication(long... nums) {
Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
.filter(node -> node.status().wantToRetire())
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().retired())
.filter(node -> node.state() != Node.State.parked)
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
assertEquals(expected, actual);
}
private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
Map<Flavor, Long> countsByFlavor = new HashMap<>();
for (int i = 0; i < nums.length; i++) {
if (nums[i] < 0) continue;
Flavor flavor = flavors.get(i);
countsByFlavor.put(flavor, nums[i]);
}
return countsByFlavor;
}
private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
Iterator<ApplicationId> iterator = apps.keySet().iterator();
for (int i = 0; iterator.hasNext(); i++) {
ApplicationId applicationId = iterator.next();
if (nums[i] < 0) continue;
countsByApplicationId.put(applicationId, nums[i]);
}
return countsByApplicationId;
}
static NodeFlavors makeFlavors(int numFlavors) {
FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
for (int i = 0; i < numFlavors; i++) {
flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
}
return new NodeFlavors(flavorConfigBuilder.build());
}
} | |
invok -> invoke I presume. | public void setup() {
doAnswer(invok -> {
boolean shouldRetire = ((Node) invok.getArguments()[0]).ipAddresses().equals(Collections.singleton("::1"));
return shouldRetire ? Optional.of("Some reason") : Optional.empty();
}).when(policy).shouldRetire(any(Node.class));
when(policy.isActive()).thenReturn(true);
NodeFlavors nodeFlavors = NodeRetirerTester.makeFlavors(5);
tester = new NodeRetirerTester(nodeFlavors);
retirer = spy(tester.makeNodeRetirer(policy));
tester.createReadyNodesByFlavor(21, 42, 27, 15, 8);
tester.deployApp("vespa", "calendar", new int[]{3}, new int[]{7});
tester.deployApp("vespa", "notes", new int[]{0}, new int[]{3});
tester.deployApp("sports", "results", new int[]{0}, new int[]{6});
tester.deployApp("search", "images", new int[]{3}, new int[]{4});
tester.deployApp("search", "videos", new int[]{2}, new int[]{2});
tester.deployApp("tester", "my-app", new int[]{1, 2}, new int[]{4, 6});
} | doAnswer(invok -> { | public void setup() {
doAnswer(invoke -> {
boolean shouldRetire = ((Node) invoke.getArguments()[0]).ipAddresses().equals(Collections.singleton("::1"));
return shouldRetire ? Optional.of("Some reason") : Optional.empty();
}).when(policy).shouldRetire(any(Node.class));
when(policy.isActive()).thenReturn(true);
NodeFlavors nodeFlavors = NodeRetirerTester.makeFlavors(5);
tester = new NodeRetirerTester(nodeFlavors);
retirer = spy(tester.makeNodeRetirer(policy));
tester.createReadyNodesByFlavor(21, 42, 27, 15, 8);
tester.deployApp("vespa", "calendar", new int[]{3}, new int[]{7});
tester.deployApp("vespa", "notes", new int[]{0}, new int[]{3});
tester.deployApp("sports", "results", new int[]{0}, new int[]{6});
tester.deployApp("search", "images", new int[]{3}, new int[]{4});
tester.deployApp("search", "videos", new int[]{2}, new int[]{2});
tester.deployApp("tester", "my-app", new int[]{1, 2}, new int[]{4, 6});
} | class NodeRetirerTest {
private NodeRetirerTester tester;
private NodeRetirer retirer;
private final RetirementPolicy policy = mock(RetirementPolicy.class);
@Before
@Test
public void testRetireUnallocated() {
tester.assertCountsForStateByFlavor(Node.State.ready, 12, 38, 19, 4, 8);
tester.setNumberAllowedUnallocatedRetirementsPerFlavor(6, 30, 15, 2, 4);
assertFalse(retirer.retireUnallocated());
tester.assertCountsForStateByFlavor(Node.State.parked, 6, 30, 15, 2, 4);
tester.assertCountsForStateByFlavor(Node.State.ready, 6, 8, 4, 2, 4);
tester.setNumberAllowedUnallocatedRetirementsPerFlavor(10, 20, 5, 5, 4);
assertTrue(retirer.retireUnallocated());
tester.assertCountsForStateByFlavor(Node.State.parked, 12, 38, 19, 4, 8);
tester.nodeRepository.getNodes().forEach(node ->
assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
}
@Test
public void testRetireAllocated() {
tester.nodeRepository.getNodes(Node.State.ready)
.forEach(node -> tester.nodeRepository.write(node.withIpAddresses(Collections.singleton("::2"))));
tester.assertCountsForStateByFlavor(Node.State.active, 9, 4, 8, 11, -1);
tester.setNumberAllowedAllocatedRetirementsPerFlavor(3, 2, 4, 2);
retirer.retireAllocated();
tester.assertParkedCountsByApplication(-1, -1, -1, -1, -1, -1);
tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
retirer.retireAllocated();
retirer.retireAllocated();
tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 1, 1, 1, 2);
retirer.retireAllocated();
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
retirer.retireAllocated();
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
tester.nodeRepository.getNodes().forEach(node ->
assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
}
@Test
public void testGetActiveApplicationIds() {
List<String> expectedOrder = Arrays.asList(
"tester.my-app", "vespa.calendar", "sports.results", "search.images", "vespa.notes", "search.videos");
List<String> actualOrder = retirer.getActiveApplicationIds(tester.nodeRepository.getNodes()).stream()
.map(applicationId -> applicationId.toShortString().replace(":default", ""))
.collect(Collectors.toList());
assertEquals(expectedOrder, actualOrder);
}
@Test
public void testGetRetireableNodesForApplication() {
ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
List<Node> nodes = tester.nodeRepository.getNodes(app);
Set<String> actual = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
Set<String> expected = nodes.stream().map(Node::hostname).collect(Collectors.toSet());
assertEquals(expected, actual);
Node nodeWantToRetire = tester.nodeRepository.getNode("host3.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.write(nodeWantToRetire.with(nodeWantToRetire.status().withWantToRetire(true)));
Node nodeToFail = tester.nodeRepository.getNode("host5.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.fail(nodeToFail.hostname(), Agent.system, "Failed for unit testing");
Node nodeToUpdate = tester.nodeRepository.getNode("host8.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.write(nodeToUpdate.withIpAddresses(Collections.singleton("::2")));
nodes = tester.nodeRepository.getNodes(app);
Set<String> excluded = Stream.of(nodeWantToRetire, nodeToFail, nodeToUpdate).map(Node::hostname).collect(Collectors.toSet());
Set<String> actualAfterUpdates = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
Set<String> expectedAfterUpdates = nodes.stream().map(Node::hostname).filter(node -> !excluded.contains(node)).collect(Collectors.toSet());
assertEquals(expectedAfterUpdates, actualAfterUpdates);
}
@Test
public void testGetNumberNodesAllowToRetireForCluster() {
ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
long actualAllActive = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(2, actualAllActive);
List<Node> nodesToRetire = tester.nodeRepository.getNodes(app).stream().limit(3).collect(Collectors.toList());
nodesToRetire.forEach(node -> tester.nodeRepository.write(node.with(node.status().withWantToRetire(true))));
long actualOneWantToRetire = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(0, actualOneWantToRetire);
nodesToRetire.stream().limit(2).forEach(node ->
tester.nodeRepository.park(node.hostname(), Agent.system, "Parked for unit testing"));
long actualOneRetired = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(1, actualOneRetired);
}
@Test
public void inactivePolicyDoesNothingTest() {
when(policy.isActive()).thenReturn(false);
retirer.maintain();
verify(retirer, never()).retireUnallocated();
verify(retirer, never()).retireAllocated();
}
} | class NodeRetirerTest {
private NodeRetirerTester tester;
private NodeRetirer retirer;
private final RetirementPolicy policy = mock(RetirementPolicy.class);
@Before
@Test
public void testRetireUnallocated() {
tester.assertCountsForStateByFlavor(Node.State.ready, 12, 38, 19, 4, 8);
tester.setNumberAllowedUnallocatedRetirementsPerFlavor(6, 30, 15, 2, 4);
assertFalse(retirer.retireUnallocated());
tester.assertCountsForStateByFlavor(Node.State.parked, 6, 30, 15, 2, 4);
tester.assertCountsForStateByFlavor(Node.State.ready, 6, 8, 4, 2, 4);
tester.setNumberAllowedUnallocatedRetirementsPerFlavor(10, 20, 5, 5, 4);
assertTrue(retirer.retireUnallocated());
tester.assertCountsForStateByFlavor(Node.State.parked, 12, 38, 19, 4, 8);
tester.nodeRepository.getNodes().forEach(node ->
assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
}
@Test
public void testRetireAllocated() {
tester.nodeRepository.getNodes(Node.State.ready)
.forEach(node -> tester.nodeRepository.write(node.withIpAddresses(Collections.singleton("::2"))));
tester.assertCountsForStateByFlavor(Node.State.active, 9, 4, 8, 11, -1);
tester.setNumberAllowedAllocatedRetirementsPerFlavor(3, 2, 4, 2);
retirer.retireAllocated();
tester.assertParkedCountsByApplication(-1, -1, -1, -1, -1, -1);
tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
retirer.retireAllocated();
retirer.retireAllocated();
tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 1, 1, 1, 2);
retirer.retireAllocated();
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
retirer.retireAllocated();
tester.iterateMaintainers();
tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
tester.nodeRepository.getNodes().forEach(node ->
assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
}
@Test
public void testGetActiveApplicationIds() {
List<String> expectedOrder = Arrays.asList(
"tester.my-app", "vespa.calendar", "sports.results", "search.images", "vespa.notes", "search.videos");
List<String> actualOrder = retirer.getActiveApplicationIds(tester.nodeRepository.getNodes()).stream()
.map(applicationId -> applicationId.toShortString().replace(":default", ""))
.collect(Collectors.toList());
assertEquals(expectedOrder, actualOrder);
}
@Test
public void testGetRetireableNodesForApplication() {
ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
List<Node> nodes = tester.nodeRepository.getNodes(app);
Set<String> actual = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
Set<String> expected = nodes.stream().map(Node::hostname).collect(Collectors.toSet());
assertEquals(expected, actual);
Node nodeWantToRetire = tester.nodeRepository.getNode("host3.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.write(nodeWantToRetire.with(nodeWantToRetire.status().withWantToRetire(true)));
Node nodeToFail = tester.nodeRepository.getNode("host5.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.fail(nodeToFail.hostname(), Agent.system, "Failed for unit testing");
Node nodeToUpdate = tester.nodeRepository.getNode("host8.test.yahoo.com").orElseThrow(RuntimeException::new);
tester.nodeRepository.write(nodeToUpdate.withIpAddresses(Collections.singleton("::2")));
nodes = tester.nodeRepository.getNodes(app);
Set<String> excluded = Stream.of(nodeWantToRetire, nodeToFail, nodeToUpdate).map(Node::hostname).collect(Collectors.toSet());
Set<String> actualAfterUpdates = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
Set<String> expectedAfterUpdates = nodes.stream().map(Node::hostname).filter(node -> !excluded.contains(node)).collect(Collectors.toSet());
assertEquals(expectedAfterUpdates, actualAfterUpdates);
}
@Test
public void testGetNumberNodesAllowToRetireForCluster() {
ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
long actualAllActive = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(2, actualAllActive);
List<Node> nodesToRetire = tester.nodeRepository.getNodes(app).stream().limit(3).collect(Collectors.toList());
nodesToRetire.forEach(node -> tester.nodeRepository.write(node.with(node.status().withWantToRetire(true))));
long actualOneWantToRetire = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(0, actualOneWantToRetire);
nodesToRetire.stream().limit(2).forEach(node ->
tester.nodeRepository.park(node.hostname(), Agent.system, "Parked for unit testing"));
long actualOneRetired = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
assertEquals(1, actualOneRetired);
}
@Test
public void inactivePolicyDoesNothingTest() {
when(policy.isActive()).thenReturn(false);
retirer.maintain();
verify(retirer, never()).retireUnallocated();
verify(retirer, never()).retireAllocated();
}
} |
Side note: the C++ implementation doesn't require (or even parse) the `name` field at the present. But at least it will happily ignore its presence until it's added. | private static PeerPolicy toPeerPolicy(AuthorizedPeer authorizedPeer) {
if (authorizedPeer.name == null) {
throw missingFieldException("name");
}
if (authorizedPeer.requiredCredentials.isEmpty()) {
throw missingFieldException("required-credentials");
}
return new PeerPolicy(authorizedPeer.name, toRequestPeerCredentials(authorizedPeer.requiredCredentials));
} | throw missingFieldException("name"); | private static PeerPolicy toPeerPolicy(AuthorizedPeer authorizedPeer) {
if (authorizedPeer.name == null) {
throw missingFieldException("name");
}
if (authorizedPeer.requiredCredentials.isEmpty()) {
throw missingFieldException("required-credentials");
}
return new PeerPolicy(authorizedPeer.name, toRequestPeerCredentials(authorizedPeer.requiredCredentials));
} | class TransportSecurityOptionsJsonSerializer {
private static final ObjectMapper mapper = new ObjectMapper();
public TransportSecurityOptions deserialize(InputStream in) {
try {
TransportSecurityOptionsEntity entity = mapper.readValue(in, TransportSecurityOptionsEntity.class);
return toTransportSecurityOptions(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static TransportSecurityOptions toTransportSecurityOptions(TransportSecurityOptionsEntity entity) {
TransportSecurityOptions.Builder builder = new TransportSecurityOptions.Builder();
Files files = entity.files;
if (files != null) {
if (files.certificatesFile != null && files.privateKeyFile != null) {
builder.withCertificate(Paths.get(files.certificatesFile), Paths.get(files.privateKeyFile));
} else if (files.certificatesFile != null || files.privateKeyFile != null) {
throw new IllegalArgumentException("Both 'private-key' and 'certificates' must be configured together");
}
if (files.caCertificatesFile != null) {
builder.withCaCertificate(Paths.get(files.caCertificatesFile));
}
}
List<AuthorizedPeer> authorizedPeersEntity = entity.authorizedPeers;
if (authorizedPeersEntity.size() > 0) {
builder.withAuthorizedPeers(new AuthorizedPeers(toPeerPolicies(authorizedPeersEntity)));
}
return builder.build();
}
private static Set<PeerPolicy> toPeerPolicies(List<AuthorizedPeer> authorizedPeersEntity) {
return authorizedPeersEntity.stream()
.map(TransportSecurityOptionsJsonSerializer::toPeerPolicy)
.collect(toSet());
}
private static List<RequiredPeerCredential> toRequestPeerCredentials(List<RequiredCredential> requiredCredentials) {
return requiredCredentials.stream()
.map(TransportSecurityOptionsJsonSerializer::toRequiredPeerCredential)
.collect(toList());
}
private static RequiredPeerCredential toRequiredPeerCredential(RequiredCredential requiredCredential) {
if (requiredCredential.field == null) {
throw new IllegalArgumentException("field");
}
if (requiredCredential.matchExpression == null) {
throw new IllegalArgumentException("must-match");
}
return new RequiredPeerCredential(toField(requiredCredential.field), new HostGlobPattern(requiredCredential.matchExpression));
}
private static RequiredPeerCredential.Field toField(CredentialField field) {
switch (field) {
case CN: return RequiredPeerCredential.Field.CN;
case SAN_DNS: return RequiredPeerCredential.Field.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static TransportSecurityOptionsEntity toTransportSecurityOptionsEntity(TransportSecurityOptions options) {
TransportSecurityOptionsEntity entity = new TransportSecurityOptionsEntity();
entity.files = new Files();
options.getCaCertificatesFile().ifPresent(value -> entity.files.caCertificatesFile = value.toString());
options.getCertificatesFile().ifPresent(value -> entity.files.certificatesFile = value.toString());
options.getPrivateKeyFile().ifPresent(value -> entity.files.privateKeyFile = value.toString());
options.getAuthorizedPeers().ifPresent( authorizedPeers -> {
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
AuthorizedPeer authorizedPeer = new AuthorizedPeer();
authorizedPeer.name = peerPolicy.peerName();
for (RequiredPeerCredential requiredPeerCredential : peerPolicy.requiredCredentials()) {
RequiredCredential requiredCredential = new RequiredCredential();
requiredCredential.field = toField(requiredPeerCredential.field());
requiredCredential.matchExpression = requiredPeerCredential.pattern().asString();
authorizedPeer.requiredCredentials.add(requiredCredential);
}
entity.authorizedPeers.add(authorizedPeer);
}
});
return entity;
}
private static CredentialField toField(RequiredPeerCredential.Field field) {
switch (field) {
case CN: return CredentialField.CN;
case SAN_DNS: return CredentialField.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static IllegalArgumentException missingFieldException(String fieldName) {
return new IllegalArgumentException(String.format("'%s' missing", fieldName));
}
} | class TransportSecurityOptionsJsonSerializer {
private static final ObjectMapper mapper = new ObjectMapper();
public TransportSecurityOptions deserialize(InputStream in) {
try {
TransportSecurityOptionsEntity entity = mapper.readValue(in, TransportSecurityOptionsEntity.class);
return toTransportSecurityOptions(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static TransportSecurityOptions toTransportSecurityOptions(TransportSecurityOptionsEntity entity) {
TransportSecurityOptions.Builder builder = new TransportSecurityOptions.Builder();
Files files = entity.files;
if (files != null) {
if (files.certificatesFile != null && files.privateKeyFile != null) {
builder.withCertificates(Paths.get(files.certificatesFile), Paths.get(files.privateKeyFile));
} else if (files.certificatesFile != null || files.privateKeyFile != null) {
throw new IllegalArgumentException("Both 'private-key' and 'certificates' must be configured together");
}
if (files.caCertificatesFile != null) {
builder.withCaCertificates(Paths.get(files.caCertificatesFile));
}
}
List<AuthorizedPeer> authorizedPeersEntity = entity.authorizedPeers;
if (authorizedPeersEntity.size() > 0) {
builder.withAuthorizedPeers(new AuthorizedPeers(toPeerPolicies(authorizedPeersEntity)));
}
return builder.build();
}
private static Set<PeerPolicy> toPeerPolicies(List<AuthorizedPeer> authorizedPeersEntity) {
return authorizedPeersEntity.stream()
.map(TransportSecurityOptionsJsonSerializer::toPeerPolicy)
.collect(toSet());
}
private static List<RequiredPeerCredential> toRequestPeerCredentials(List<RequiredCredential> requiredCredentials) {
return requiredCredentials.stream()
.map(TransportSecurityOptionsJsonSerializer::toRequiredPeerCredential)
.collect(toList());
}
private static RequiredPeerCredential toRequiredPeerCredential(RequiredCredential requiredCredential) {
if (requiredCredential.field == null) {
throw missingFieldException("field");
}
if (requiredCredential.matchExpression == null) {
throw missingFieldException("must-match");
}
return new RequiredPeerCredential(toField(requiredCredential.field), new HostGlobPattern(requiredCredential.matchExpression));
}
private static RequiredPeerCredential.Field toField(CredentialField field) {
switch (field) {
case CN: return RequiredPeerCredential.Field.CN;
case SAN_DNS: return RequiredPeerCredential.Field.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static TransportSecurityOptionsEntity toTransportSecurityOptionsEntity(TransportSecurityOptions options) {
TransportSecurityOptionsEntity entity = new TransportSecurityOptionsEntity();
entity.files = new Files();
options.getCaCertificatesFile().ifPresent(value -> entity.files.caCertificatesFile = value.toString());
options.getCertificatesFile().ifPresent(value -> entity.files.certificatesFile = value.toString());
options.getPrivateKeyFile().ifPresent(value -> entity.files.privateKeyFile = value.toString());
options.getAuthorizedPeers().ifPresent( authorizedPeers -> {
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
AuthorizedPeer authorizedPeer = new AuthorizedPeer();
authorizedPeer.name = peerPolicy.peerName();
for (RequiredPeerCredential requiredPeerCredential : peerPolicy.requiredCredentials()) {
RequiredCredential requiredCredential = new RequiredCredential();
requiredCredential.field = toField(requiredPeerCredential.field());
requiredCredential.matchExpression = requiredPeerCredential.pattern().asString();
authorizedPeer.requiredCredentials.add(requiredCredential);
}
entity.authorizedPeers.add(authorizedPeer);
}
});
return entity;
}
private static CredentialField toField(RequiredPeerCredential.Field field) {
switch (field) {
case CN: return CredentialField.CN;
case SAN_DNS: return CredentialField.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static IllegalArgumentException missingFieldException(String fieldName) {
return new IllegalArgumentException(String.format("'%s' missing", fieldName));
}
} |
Not all combinators commute so I think you need to keep track of whether you switched order of a and b here. | private Tensor mappedHashJoin(Tensor a, Tensor b, TensorType joinedType) {
TensorType commonDimensionType = commonDimensions(a, b);
if (commonDimensionType.dimensions().isEmpty()) {
return mappedGeneralJoin(a, b, joinedType);
}
Tensor smallerTensor = a.size() > b.size() ? b : a;
Tensor largerTensor = a.size() > b.size() ? a : b;
a = smallerTensor;
b = largerTensor;
Map<TensorAddress, List<Tensor.Cell>> aCellsByCommonAddress = new HashMap<>();
for (Iterator<Tensor.Cell> cellIterator = a.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell cell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(cell, a.type(), commonDimensionType);
aCellsByCommonAddress.putIfAbsent(partialCommonAddress, new ArrayList<>());
aCellsByCommonAddress.get(partialCommonAddress).add(cell);
}
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> cellIterator = b.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell cell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(cell, b.type(), commonDimensionType);
for (Tensor.Cell aCell : aCellsByCommonAddress.getOrDefault(partialCommonAddress, Collections.emptyList())) {
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
cell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), cell.getValue()));
}
}
return builder.build();
} | builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), cell.getValue())); | private Tensor mappedHashJoin(Tensor a, Tensor b, TensorType joinedType) {
TensorType commonDimensionType = commonDimensions(a, b);
if (commonDimensionType.dimensions().isEmpty()) {
return mappedGeneralJoin(a, b, joinedType);
}
boolean swapTensors = a.size() > b.size();
if (swapTensors) {
Tensor temp = a;
a = b;
b = temp;
}
int[] aIndexesInCommon = mapIndexes(commonDimensionType, a.type());
int[] bIndexesInCommon = mapIndexes(commonDimensionType, b.type());
int[] aIndexesInJoined = mapIndexes(a.type(), joinedType);
int[] bIndexesInJoined = mapIndexes(b.type(), joinedType);
Map<TensorAddress, List<Tensor.Cell>> aCellsByCommonAddress = new HashMap<>();
for (Iterator<Tensor.Cell> cellIterator = a.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell aCell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(aCell, aIndexesInCommon);
aCellsByCommonAddress.putIfAbsent(partialCommonAddress, new ArrayList<>());
aCellsByCommonAddress.get(partialCommonAddress).add(aCell);
}
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> cellIterator = b.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell bCell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(bCell, bIndexesInCommon);
for (Tensor.Cell aCell : aCellsByCommonAddress.getOrDefault(partialCommonAddress, Collections.emptyList())) {
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aIndexesInJoined,
bCell.getKey(), bIndexesInJoined, joinedType);
if (combinedAddress == null) continue;
double combinedValue = swapTensors ?
combinator.applyAsDouble(bCell.getValue(), aCell.getValue()) :
combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(combinedAddress, combinedValue);
}
}
return builder.build();
} | class Join extends PrimitiveTensorFunction {
private final TensorFunction argumentA, argumentB;
private final DoubleBinaryOperator combinator;
public Join(TensorFunction argumentA, TensorFunction argumentB, DoubleBinaryOperator combinator) {
Objects.requireNonNull(argumentA, "The first argument tensor cannot be null");
Objects.requireNonNull(argumentB, "The second argument tensor cannot be null");
Objects.requireNonNull(combinator, "The combinator function cannot be null");
this.argumentA = argumentA;
this.argumentB = argumentB;
this.combinator = combinator;
}
public TensorFunction argumentA() { return argumentA; }
public TensorFunction argumentB() { return argumentB; }
public DoubleBinaryOperator combinator() { return combinator; }
@Override
public List<TensorFunction> functionArguments() { return ImmutableList.of(argumentA, argumentB); }
@Override
public TensorFunction replaceArguments(List<TensorFunction> arguments) {
if ( arguments.size() != 2)
throw new IllegalArgumentException("Join must have 2 arguments, got " + arguments.size());
return new Join(arguments.get(0), arguments.get(1), combinator);
}
@Override
public PrimitiveTensorFunction toPrimitive() {
return new Join(argumentA.toPrimitive(), argumentB.toPrimitive(), combinator);
}
@Override
public String toString(ToStringContext context) {
return "join(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + combinator + ")";
}
@Override
public Tensor evaluate(EvaluationContext context) {
Tensor a = argumentA.evaluate(context);
Tensor b = argumentB.evaluate(context);
TensorType joinedType = new TensorType.Builder(a.type(), b.type()).build();
if (hasSingleIndexedDimension(a) && hasSingleIndexedDimension(b) && a.type().dimensions().get(0).name().equals(b.type().dimensions().get(0).name()))
return indexedVectorJoin((IndexedTensor)a, (IndexedTensor)b, joinedType);
else if (joinedType.dimensions().size() == a.type().dimensions().size() && joinedType.dimensions().size() == b.type().dimensions().size())
return singleSpaceJoin(a, b, joinedType);
else if (a.type().dimensions().containsAll(b.type().dimensions()))
return subspaceJoin(b, a, joinedType, true);
else if (b.type().dimensions().containsAll(a.type().dimensions()))
return subspaceJoin(a, b, joinedType, false);
else
return generalJoin(a, b, joinedType);
}
private boolean hasSingleIndexedDimension(Tensor tensor) {
return tensor.type().dimensions().size() == 1 && tensor.type().dimensions().get(0).isIndexed();
}
private Tensor indexedVectorJoin(IndexedTensor a, IndexedTensor b, TensorType type) {
int joinedLength = Math.min(a.dimensionSizes().size(0), b.dimensionSizes().size(0));
Iterator<Double> aIterator = a.valueIterator();
Iterator<Double> bIterator = b.valueIterator();
IndexedTensor.Builder builder = IndexedTensor.Builder.of(type, new DimensionSizes.Builder(1).set(0, joinedLength).build());
for (int i = 0; i < joinedLength; i++)
builder.cell(combinator.applyAsDouble(aIterator.next(), bIterator.next()), i);
return builder.build();
}
/** When both tensors have the same dimensions, at most one cell matches a cell in the other tensor */
private Tensor singleSpaceJoin(Tensor a, Tensor b, TensorType joinedType) {
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = a.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = i.next();
double bCellValue = b.get(aCell.getKey());
if (Double.isNaN(bCellValue)) continue;
builder.cell(aCell.getKey(), combinator.applyAsDouble(aCell.getValue(), bCellValue));
}
return builder.build();
}
/** Join a tensor into a superspace */
private Tensor subspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace instanceof IndexedTensor && superspace instanceof IndexedTensor)
return indexedSubspaceJoin((IndexedTensor) subspace, (IndexedTensor) superspace, joinedType, reversedArgumentOrder);
else
return generalSubspaceJoin(subspace, superspace, joinedType, reversedArgumentOrder);
}
private Tensor indexedSubspaceJoin(IndexedTensor subspace, IndexedTensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace.size() == 0 || superspace.size() == 0)
return Tensor.Builder.of(joinedType, new DimensionSizes.Builder(joinedType.dimensions().size()).build()).build();
DimensionSizes joinedSizes = joinedSize(joinedType, subspace, superspace);
IndexedTensor.Builder builder = (IndexedTensor.Builder)Tensor.Builder.of(joinedType, joinedSizes);
Set<String> superDimensionNames = new HashSet<>(superspace.type().dimensionNames());
superDimensionNames.removeAll(subspace.type().dimensionNames());
for (Iterator<IndexedTensor.SubspaceIterator> i = superspace.subspaceIterator(superDimensionNames, joinedSizes); i.hasNext(); ) {
IndexedTensor.SubspaceIterator subspaceInSuper = i.next();
joinSubspaces(subspace.valueIterator(), subspace.size(),
subspaceInSuper, subspaceInSuper.size(),
reversedArgumentOrder, builder);
}
return builder.build();
}
private void joinSubspaces(Iterator<Double> subspace, int subspaceSize,
Iterator<Tensor.Cell> superspace, int superspaceSize,
boolean reversedArgumentOrder, IndexedTensor.Builder builder) {
int joinedLength = Math.min(subspaceSize, superspaceSize);
if (reversedArgumentOrder) {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(supercell.getValue(), subspace.next()));
}
} else {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(subspace.next(), supercell.getValue()));
}
}
}
private DimensionSizes joinedSize(TensorType joinedType, IndexedTensor a, IndexedTensor b) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(joinedType.dimensions().size());
for (int i = 0; i < builder.dimensions(); i++) {
String dimensionName = joinedType.dimensions().get(i).name();
Optional<Integer> aIndex = a.type().indexOfDimension(dimensionName);
Optional<Integer> bIndex = b.type().indexOfDimension(dimensionName);
if (aIndex.isPresent() && bIndex.isPresent())
builder.set(i, Math.min(b.dimensionSizes().size(bIndex.get()), a.dimensionSizes().size(aIndex.get())));
else if (aIndex.isPresent())
builder.set(i, a.dimensionSizes().size(aIndex.get()));
else if (bIndex.isPresent())
builder.set(i, b.dimensionSizes().size(bIndex.get()));
}
return builder.build();
}
private Tensor generalSubspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
int[] subspaceIndexes = subspaceIndexes(superspace.type(), subspace.type());
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = superspace.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> supercell = i.next();
TensorAddress subaddress = mapAddressToSubspace(supercell.getKey(), subspaceIndexes);
double subspaceValue = subspace.get(subaddress);
if ( ! Double.isNaN(subspaceValue))
builder.cell(supercell.getKey(),
reversedArgumentOrder ? combinator.applyAsDouble(supercell.getValue(), subspaceValue)
: combinator.applyAsDouble(subspaceValue, supercell.getValue()));
}
return builder.build();
}
/** Returns the indexes in the superspace type which should be retained to create the subspace type */
private int[] subspaceIndexes(TensorType supertype, TensorType subtype) {
int[] subspaceIndexes = new int[subtype.dimensions().size()];
for (int i = 0; i < subtype.dimensions().size(); i++)
subspaceIndexes[i] = supertype.indexOfDimension(subtype.dimensions().get(i).name()).get();
return subspaceIndexes;
}
private TensorAddress mapAddressToSubspace(TensorAddress superAddress, int[] subspaceIndexes) {
String[] subspaceLabels = new String[subspaceIndexes.length];
for (int i = 0; i < subspaceIndexes.length; i++)
subspaceLabels[i] = superAddress.label(subspaceIndexes[i]);
return TensorAddress.of(subspaceLabels);
}
/** Slow join which works for any two tensors */
private Tensor generalJoin(Tensor a, Tensor b, TensorType joinedType) {
if (a instanceof IndexedTensor && b instanceof IndexedTensor)
return indexedGeneralJoin((IndexedTensor) a, (IndexedTensor) b, joinedType);
else
return mappedHashJoin(a, b, joinedType);
}
private Tensor indexedGeneralJoin(IndexedTensor a, IndexedTensor b, TensorType joinedType) {
DimensionSizes joinedSize = joinedSize(joinedType, a, b);
Tensor.Builder builder = Tensor.Builder.of(joinedType, joinedSize);
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
joinTo(a, b, joinedType, joinedSize, aToIndexes, bToIndexes, false, builder);
joinTo(b, a, joinedType, joinedSize, bToIndexes, aToIndexes, true, builder);
return builder.build();
}
private void joinTo(IndexedTensor a, IndexedTensor b, TensorType joinedType, DimensionSizes joinedSize,
int[] aToIndexes, int[] bToIndexes, boolean reversedOrder, Tensor.Builder builder) {
Set<String> sharedDimensions = Sets.intersection(a.type().dimensionNames(), b.type().dimensionNames());
Set<String> dimensionsOnlyInA = Sets.difference(a.type().dimensionNames(), b.type().dimensionNames());
DimensionSizes aIterateSize = joinedSizeOf(a.type(), joinedType, joinedSize);
DimensionSizes bIterateSize = joinedSizeOf(b.type(), joinedType, joinedSize);
for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(dimensionsOnlyInA, aIterateSize); ia.hasNext(); ) {
IndexedTensor.SubspaceIterator aSubspace = ia.next();
while (aSubspace.hasNext()) {
Tensor.Cell aCell = aSubspace.next();
PartialAddress matchingBCells = partialAddress(a.type(), aSubspace.address(), sharedDimensions);
for (IndexedTensor.SubspaceIterator bSubspace = b.cellIterator(matchingBCells, bIterateSize); bSubspace.hasNext(); ) {
Tensor.Cell bCell = bSubspace.next();
TensorAddress joinedAddress = joinAddresses(aCell.getKey(), aToIndexes, bCell.getKey(), bToIndexes, joinedType);
double joinedValue = reversedOrder ? combinator.applyAsDouble(bCell.getValue(), aCell.getValue())
: combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(joinedAddress, joinedValue);
}
}
}
}
private PartialAddress partialAddress(TensorType addressType, TensorAddress address, Set<String> retainDimensions) {
PartialAddress.Builder builder = new PartialAddress.Builder(retainDimensions.size());
for (int i = 0; i < addressType.dimensions().size(); i++)
if (retainDimensions.contains(addressType.dimensions().get(i).name()))
builder.add(addressType.dimensions().get(i).name(), address.intLabel(i));
return builder.build();
}
/** Returns the sizes from the joined sizes which are present in the type argument */
private DimensionSizes joinedSizeOf(TensorType type, TensorType joinedType, DimensionSizes joinedSizes) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size());
int dimensionIndex = 0;
for (int i = 0; i < joinedType.dimensions().size(); i++) {
if (type.dimensionNames().contains(joinedType.dimensions().get(i).name()))
builder.set(dimensionIndex++, joinedSizes.size(i));
}
return builder.build();
}
private Tensor mappedGeneralJoin(Tensor a, Tensor b, TensorType joinedType) {
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> aIterator = a.cellIterator(); aIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = aIterator.next();
for (Iterator<Tensor.Cell> bIterator = b.cellIterator(); bIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> bCell = bIterator.next();
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
bCell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), bCell.getValue()));
}
}
return builder.build();
}
/**
* Returns the an array having one entry in order for each dimension of fromType
* containing the index at which toType contains the same dimension name.
* That is, if the returned array contains n at index i then
* fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name())
* If some dimension in fromType is not present in toType, the corresponding index will be -1
*/
private int[] mapIndexes(TensorType fromType, TensorType toType) {
int[] toIndexes = new int[fromType.dimensions().size()];
for (int i = 0; i < fromType.dimensions().size(); i++)
toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1);
return toIndexes;
}
private TensorAddress joinAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes,
TensorType joinedType) {
String[] joinedLabels = new String[joinedType.dimensions().size()];
mapContent(a, joinedLabels, aToIndexes);
boolean compatible = mapContent(b, joinedLabels, bToIndexes);
if ( ! compatible) return null;
return TensorAddress.of(joinedLabels);
}
/**
* Maps the content in the given list to the given array, using the given index map.
*
* @return true if the mapping was successful, false if one of the destination positions was
* occupied by a different value
*/
private boolean mapContent(TensorAddress from, String[] to, int[] indexMap) {
for (int i = 0; i < from.size(); i++) {
int toIndex = indexMap[i];
if (to[toIndex] != null && ! to[toIndex].equals(from.label(i))) return false;
to[toIndex] = from.label(i);
}
return true;
}
/**
* Returns common dimension of a and b as a new tensor type
*/
private TensorType commonDimensions(Tensor a, Tensor b) {
TensorType.Builder typeBuilder = new TensorType.Builder();
TensorType aType = a.type();
TensorType bType = b.type();
for (int i = 0; i < aType.dimensions().size(); ++i) {
TensorType.Dimension aDim = aType.dimensions().get(i);
for (int j = 0; j < bType.dimensions().size(); ++j) {
TensorType.Dimension bDim = bType.dimensions().get(j);
if (aDim.equals(bDim)) {
typeBuilder.set(bDim);
}
}
}
return typeBuilder.build();
}
private TensorAddress partialCommonAddress(Tensor.Cell cell, TensorType type, TensorType commonDimensions) {
TensorAddress address = cell.getKey();
String[] labels = new String[commonDimensions.dimensions().size()];
for (int i = 0; i < labels.length; ++i) {
String name = commonDimensions.dimensions().get(i).name();
int index = type.indexOfDimension(name).orElseThrow(RuntimeException::new);
labels[i] = address.label(index);
}
return TensorAddress.of(labels);
}
} | class Join extends PrimitiveTensorFunction {
private final TensorFunction argumentA, argumentB;
private final DoubleBinaryOperator combinator;
public Join(TensorFunction argumentA, TensorFunction argumentB, DoubleBinaryOperator combinator) {
Objects.requireNonNull(argumentA, "The first argument tensor cannot be null");
Objects.requireNonNull(argumentB, "The second argument tensor cannot be null");
Objects.requireNonNull(combinator, "The combinator function cannot be null");
this.argumentA = argumentA;
this.argumentB = argumentB;
this.combinator = combinator;
}
public TensorFunction argumentA() { return argumentA; }
public TensorFunction argumentB() { return argumentB; }
public DoubleBinaryOperator combinator() { return combinator; }
@Override
public List<TensorFunction> functionArguments() { return ImmutableList.of(argumentA, argumentB); }
@Override
public TensorFunction replaceArguments(List<TensorFunction> arguments) {
if ( arguments.size() != 2)
throw new IllegalArgumentException("Join must have 2 arguments, got " + arguments.size());
return new Join(arguments.get(0), arguments.get(1), combinator);
}
@Override
public PrimitiveTensorFunction toPrimitive() {
return new Join(argumentA.toPrimitive(), argumentB.toPrimitive(), combinator);
}
@Override
public String toString(ToStringContext context) {
return "join(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + combinator + ")";
}
@Override
public Tensor evaluate(EvaluationContext context) {
Tensor a = argumentA.evaluate(context);
Tensor b = argumentB.evaluate(context);
TensorType joinedType = new TensorType.Builder(a.type(), b.type()).build();
if (hasSingleIndexedDimension(a) && hasSingleIndexedDimension(b) && a.type().dimensions().get(0).name().equals(b.type().dimensions().get(0).name()))
return indexedVectorJoin((IndexedTensor)a, (IndexedTensor)b, joinedType);
else if (joinedType.dimensions().size() == a.type().dimensions().size() && joinedType.dimensions().size() == b.type().dimensions().size())
return singleSpaceJoin(a, b, joinedType);
else if (a.type().dimensions().containsAll(b.type().dimensions()))
return subspaceJoin(b, a, joinedType, true);
else if (b.type().dimensions().containsAll(a.type().dimensions()))
return subspaceJoin(a, b, joinedType, false);
else
return generalJoin(a, b, joinedType);
}
private boolean hasSingleIndexedDimension(Tensor tensor) {
return tensor.type().dimensions().size() == 1 && tensor.type().dimensions().get(0).isIndexed();
}
private Tensor indexedVectorJoin(IndexedTensor a, IndexedTensor b, TensorType type) {
int joinedLength = Math.min(a.dimensionSizes().size(0), b.dimensionSizes().size(0));
Iterator<Double> aIterator = a.valueIterator();
Iterator<Double> bIterator = b.valueIterator();
IndexedTensor.Builder builder = IndexedTensor.Builder.of(type, new DimensionSizes.Builder(1).set(0, joinedLength).build());
for (int i = 0; i < joinedLength; i++)
builder.cell(combinator.applyAsDouble(aIterator.next(), bIterator.next()), i);
return builder.build();
}
/** When both tensors have the same dimensions, at most one cell matches a cell in the other tensor */
private Tensor singleSpaceJoin(Tensor a, Tensor b, TensorType joinedType) {
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = a.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = i.next();
double bCellValue = b.get(aCell.getKey());
if (Double.isNaN(bCellValue)) continue;
builder.cell(aCell.getKey(), combinator.applyAsDouble(aCell.getValue(), bCellValue));
}
return builder.build();
}
/** Join a tensor into a superspace */
private Tensor subspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace instanceof IndexedTensor && superspace instanceof IndexedTensor)
return indexedSubspaceJoin((IndexedTensor) subspace, (IndexedTensor) superspace, joinedType, reversedArgumentOrder);
else
return generalSubspaceJoin(subspace, superspace, joinedType, reversedArgumentOrder);
}
private Tensor indexedSubspaceJoin(IndexedTensor subspace, IndexedTensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace.size() == 0 || superspace.size() == 0)
return Tensor.Builder.of(joinedType, new DimensionSizes.Builder(joinedType.dimensions().size()).build()).build();
DimensionSizes joinedSizes = joinedSize(joinedType, subspace, superspace);
IndexedTensor.Builder builder = (IndexedTensor.Builder)Tensor.Builder.of(joinedType, joinedSizes);
Set<String> superDimensionNames = new HashSet<>(superspace.type().dimensionNames());
superDimensionNames.removeAll(subspace.type().dimensionNames());
for (Iterator<IndexedTensor.SubspaceIterator> i = superspace.subspaceIterator(superDimensionNames, joinedSizes); i.hasNext(); ) {
IndexedTensor.SubspaceIterator subspaceInSuper = i.next();
joinSubspaces(subspace.valueIterator(), subspace.size(),
subspaceInSuper, subspaceInSuper.size(),
reversedArgumentOrder, builder);
}
return builder.build();
}
private void joinSubspaces(Iterator<Double> subspace, int subspaceSize,
Iterator<Tensor.Cell> superspace, int superspaceSize,
boolean reversedArgumentOrder, IndexedTensor.Builder builder) {
int joinedLength = Math.min(subspaceSize, superspaceSize);
if (reversedArgumentOrder) {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(supercell.getValue(), subspace.next()));
}
} else {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(subspace.next(), supercell.getValue()));
}
}
}
private DimensionSizes joinedSize(TensorType joinedType, IndexedTensor a, IndexedTensor b) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(joinedType.dimensions().size());
for (int i = 0; i < builder.dimensions(); i++) {
String dimensionName = joinedType.dimensions().get(i).name();
Optional<Integer> aIndex = a.type().indexOfDimension(dimensionName);
Optional<Integer> bIndex = b.type().indexOfDimension(dimensionName);
if (aIndex.isPresent() && bIndex.isPresent())
builder.set(i, Math.min(b.dimensionSizes().size(bIndex.get()), a.dimensionSizes().size(aIndex.get())));
else if (aIndex.isPresent())
builder.set(i, a.dimensionSizes().size(aIndex.get()));
else if (bIndex.isPresent())
builder.set(i, b.dimensionSizes().size(bIndex.get()));
}
return builder.build();
}
private Tensor generalSubspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
int[] subspaceIndexes = subspaceIndexes(superspace.type(), subspace.type());
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = superspace.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> supercell = i.next();
TensorAddress subaddress = mapAddressToSubspace(supercell.getKey(), subspaceIndexes);
double subspaceValue = subspace.get(subaddress);
if ( ! Double.isNaN(subspaceValue))
builder.cell(supercell.getKey(),
reversedArgumentOrder ? combinator.applyAsDouble(supercell.getValue(), subspaceValue)
: combinator.applyAsDouble(subspaceValue, supercell.getValue()));
}
return builder.build();
}
/** Returns the indexes in the superspace type which should be retained to create the subspace type */
private int[] subspaceIndexes(TensorType supertype, TensorType subtype) {
int[] subspaceIndexes = new int[subtype.dimensions().size()];
for (int i = 0; i < subtype.dimensions().size(); i++)
subspaceIndexes[i] = supertype.indexOfDimension(subtype.dimensions().get(i).name()).get();
return subspaceIndexes;
}
private TensorAddress mapAddressToSubspace(TensorAddress superAddress, int[] subspaceIndexes) {
String[] subspaceLabels = new String[subspaceIndexes.length];
for (int i = 0; i < subspaceIndexes.length; i++)
subspaceLabels[i] = superAddress.label(subspaceIndexes[i]);
return TensorAddress.of(subspaceLabels);
}
/** Slow join which works for any two tensors */
private Tensor generalJoin(Tensor a, Tensor b, TensorType joinedType) {
if (a instanceof IndexedTensor && b instanceof IndexedTensor)
return indexedGeneralJoin((IndexedTensor) a, (IndexedTensor) b, joinedType);
else
return mappedHashJoin(a, b, joinedType);
}
private Tensor indexedGeneralJoin(IndexedTensor a, IndexedTensor b, TensorType joinedType) {
DimensionSizes joinedSize = joinedSize(joinedType, a, b);
Tensor.Builder builder = Tensor.Builder.of(joinedType, joinedSize);
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
joinTo(a, b, joinedType, joinedSize, aToIndexes, bToIndexes, false, builder);
joinTo(b, a, joinedType, joinedSize, bToIndexes, aToIndexes, true, builder);
return builder.build();
}
private void joinTo(IndexedTensor a, IndexedTensor b, TensorType joinedType, DimensionSizes joinedSize,
int[] aToIndexes, int[] bToIndexes, boolean reversedOrder, Tensor.Builder builder) {
Set<String> sharedDimensions = Sets.intersection(a.type().dimensionNames(), b.type().dimensionNames());
Set<String> dimensionsOnlyInA = Sets.difference(a.type().dimensionNames(), b.type().dimensionNames());
DimensionSizes aIterateSize = joinedSizeOf(a.type(), joinedType, joinedSize);
DimensionSizes bIterateSize = joinedSizeOf(b.type(), joinedType, joinedSize);
for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(dimensionsOnlyInA, aIterateSize); ia.hasNext(); ) {
IndexedTensor.SubspaceIterator aSubspace = ia.next();
while (aSubspace.hasNext()) {
Tensor.Cell aCell = aSubspace.next();
PartialAddress matchingBCells = partialAddress(a.type(), aSubspace.address(), sharedDimensions);
for (IndexedTensor.SubspaceIterator bSubspace = b.cellIterator(matchingBCells, bIterateSize); bSubspace.hasNext(); ) {
Tensor.Cell bCell = bSubspace.next();
TensorAddress joinedAddress = joinAddresses(aCell.getKey(), aToIndexes, bCell.getKey(), bToIndexes, joinedType);
double joinedValue = reversedOrder ? combinator.applyAsDouble(bCell.getValue(), aCell.getValue())
: combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(joinedAddress, joinedValue);
}
}
}
}
private PartialAddress partialAddress(TensorType addressType, TensorAddress address, Set<String> retainDimensions) {
PartialAddress.Builder builder = new PartialAddress.Builder(retainDimensions.size());
for (int i = 0; i < addressType.dimensions().size(); i++)
if (retainDimensions.contains(addressType.dimensions().get(i).name()))
builder.add(addressType.dimensions().get(i).name(), address.intLabel(i));
return builder.build();
}
/** Returns the sizes from the joined sizes which are present in the type argument */
private DimensionSizes joinedSizeOf(TensorType type, TensorType joinedType, DimensionSizes joinedSizes) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size());
int dimensionIndex = 0;
for (int i = 0; i < joinedType.dimensions().size(); i++) {
if (type.dimensionNames().contains(joinedType.dimensions().get(i).name()))
builder.set(dimensionIndex++, joinedSizes.size(i));
}
return builder.build();
}
private Tensor mappedGeneralJoin(Tensor a, Tensor b, TensorType joinedType) {
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> aIterator = a.cellIterator(); aIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = aIterator.next();
for (Iterator<Tensor.Cell> bIterator = b.cellIterator(); bIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> bCell = bIterator.next();
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
bCell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), bCell.getValue()));
}
}
return builder.build();
}
/**
* Returns the an array having one entry in order for each dimension of fromType
* containing the index at which toType contains the same dimension name.
* That is, if the returned array contains n at index i then
* fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name())
* If some dimension in fromType is not present in toType, the corresponding index will be -1
*/
private int[] mapIndexes(TensorType fromType, TensorType toType) {
int[] toIndexes = new int[fromType.dimensions().size()];
for (int i = 0; i < fromType.dimensions().size(); i++)
toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1);
return toIndexes;
}
private TensorAddress joinAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes,
TensorType joinedType) {
String[] joinedLabels = new String[joinedType.dimensions().size()];
mapContent(a, joinedLabels, aToIndexes);
boolean compatible = mapContent(b, joinedLabels, bToIndexes);
if ( ! compatible) return null;
return TensorAddress.of(joinedLabels);
}
/**
* Maps the content in the given list to the given array, using the given index map.
*
* @return true if the mapping was successful, false if one of the destination positions was
* occupied by a different value
*/
private boolean mapContent(TensorAddress from, String[] to, int[] indexMap) {
for (int i = 0; i < from.size(); i++) {
int toIndex = indexMap[i];
if (to[toIndex] != null && ! to[toIndex].equals(from.label(i))) return false;
to[toIndex] = from.label(i);
}
return true;
}
/**
* Returns common dimension of a and b as a new tensor type
*/
private TensorType commonDimensions(Tensor a, Tensor b) {
TensorType.Builder typeBuilder = new TensorType.Builder();
TensorType aType = a.type();
TensorType bType = b.type();
for (int i = 0; i < aType.dimensions().size(); ++i) {
TensorType.Dimension aDim = aType.dimensions().get(i);
for (int j = 0; j < bType.dimensions().size(); ++j) {
TensorType.Dimension bDim = bType.dimensions().get(j);
if (aDim.equals(bDim)) {
typeBuilder.set(bDim);
}
}
}
return typeBuilder.build();
}
private TensorAddress partialCommonAddress(Tensor.Cell cell, int[] indexMap) {
TensorAddress address = cell.getKey();
String[] labels = new String[indexMap.length];
for (int i = 0; i < labels.length; ++i) {
labels[i] = address.label(indexMap[i]);
}
return TensorAddress.of(labels);
}
} |
I really like these 4 lines. | private Tensor mappedHashJoin(Tensor a, Tensor b, TensorType joinedType) {
TensorType commonDimensionType = commonDimensions(a, b);
if (commonDimensionType.dimensions().isEmpty()) {
return mappedGeneralJoin(a, b, joinedType);
}
Tensor smallerTensor = a.size() > b.size() ? b : a;
Tensor largerTensor = a.size() > b.size() ? a : b;
a = smallerTensor;
b = largerTensor;
Map<TensorAddress, List<Tensor.Cell>> aCellsByCommonAddress = new HashMap<>();
for (Iterator<Tensor.Cell> cellIterator = a.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell cell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(cell, a.type(), commonDimensionType);
aCellsByCommonAddress.putIfAbsent(partialCommonAddress, new ArrayList<>());
aCellsByCommonAddress.get(partialCommonAddress).add(cell);
}
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> cellIterator = b.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell cell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(cell, b.type(), commonDimensionType);
for (Tensor.Cell aCell : aCellsByCommonAddress.getOrDefault(partialCommonAddress, Collections.emptyList())) {
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
cell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), cell.getValue()));
}
}
return builder.build();
} | b = largerTensor; | private Tensor mappedHashJoin(Tensor a, Tensor b, TensorType joinedType) {
TensorType commonDimensionType = commonDimensions(a, b);
if (commonDimensionType.dimensions().isEmpty()) {
return mappedGeneralJoin(a, b, joinedType);
}
boolean swapTensors = a.size() > b.size();
if (swapTensors) {
Tensor temp = a;
a = b;
b = temp;
}
int[] aIndexesInCommon = mapIndexes(commonDimensionType, a.type());
int[] bIndexesInCommon = mapIndexes(commonDimensionType, b.type());
int[] aIndexesInJoined = mapIndexes(a.type(), joinedType);
int[] bIndexesInJoined = mapIndexes(b.type(), joinedType);
Map<TensorAddress, List<Tensor.Cell>> aCellsByCommonAddress = new HashMap<>();
for (Iterator<Tensor.Cell> cellIterator = a.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell aCell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(aCell, aIndexesInCommon);
aCellsByCommonAddress.putIfAbsent(partialCommonAddress, new ArrayList<>());
aCellsByCommonAddress.get(partialCommonAddress).add(aCell);
}
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> cellIterator = b.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell bCell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(bCell, bIndexesInCommon);
for (Tensor.Cell aCell : aCellsByCommonAddress.getOrDefault(partialCommonAddress, Collections.emptyList())) {
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aIndexesInJoined,
bCell.getKey(), bIndexesInJoined, joinedType);
if (combinedAddress == null) continue;
double combinedValue = swapTensors ?
combinator.applyAsDouble(bCell.getValue(), aCell.getValue()) :
combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(combinedAddress, combinedValue);
}
}
return builder.build();
} | class Join extends PrimitiveTensorFunction {
private final TensorFunction argumentA, argumentB;
private final DoubleBinaryOperator combinator;
public Join(TensorFunction argumentA, TensorFunction argumentB, DoubleBinaryOperator combinator) {
Objects.requireNonNull(argumentA, "The first argument tensor cannot be null");
Objects.requireNonNull(argumentB, "The second argument tensor cannot be null");
Objects.requireNonNull(combinator, "The combinator function cannot be null");
this.argumentA = argumentA;
this.argumentB = argumentB;
this.combinator = combinator;
}
public TensorFunction argumentA() { return argumentA; }
public TensorFunction argumentB() { return argumentB; }
public DoubleBinaryOperator combinator() { return combinator; }
@Override
public List<TensorFunction> functionArguments() { return ImmutableList.of(argumentA, argumentB); }
@Override
public TensorFunction replaceArguments(List<TensorFunction> arguments) {
if ( arguments.size() != 2)
throw new IllegalArgumentException("Join must have 2 arguments, got " + arguments.size());
return new Join(arguments.get(0), arguments.get(1), combinator);
}
@Override
public PrimitiveTensorFunction toPrimitive() {
return new Join(argumentA.toPrimitive(), argumentB.toPrimitive(), combinator);
}
@Override
public String toString(ToStringContext context) {
return "join(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + combinator + ")";
}
@Override
public Tensor evaluate(EvaluationContext context) {
Tensor a = argumentA.evaluate(context);
Tensor b = argumentB.evaluate(context);
TensorType joinedType = new TensorType.Builder(a.type(), b.type()).build();
if (hasSingleIndexedDimension(a) && hasSingleIndexedDimension(b) && a.type().dimensions().get(0).name().equals(b.type().dimensions().get(0).name()))
return indexedVectorJoin((IndexedTensor)a, (IndexedTensor)b, joinedType);
else if (joinedType.dimensions().size() == a.type().dimensions().size() && joinedType.dimensions().size() == b.type().dimensions().size())
return singleSpaceJoin(a, b, joinedType);
else if (a.type().dimensions().containsAll(b.type().dimensions()))
return subspaceJoin(b, a, joinedType, true);
else if (b.type().dimensions().containsAll(a.type().dimensions()))
return subspaceJoin(a, b, joinedType, false);
else
return generalJoin(a, b, joinedType);
}
private boolean hasSingleIndexedDimension(Tensor tensor) {
return tensor.type().dimensions().size() == 1 && tensor.type().dimensions().get(0).isIndexed();
}
private Tensor indexedVectorJoin(IndexedTensor a, IndexedTensor b, TensorType type) {
int joinedLength = Math.min(a.dimensionSizes().size(0), b.dimensionSizes().size(0));
Iterator<Double> aIterator = a.valueIterator();
Iterator<Double> bIterator = b.valueIterator();
IndexedTensor.Builder builder = IndexedTensor.Builder.of(type, new DimensionSizes.Builder(1).set(0, joinedLength).build());
for (int i = 0; i < joinedLength; i++)
builder.cell(combinator.applyAsDouble(aIterator.next(), bIterator.next()), i);
return builder.build();
}
/** When both tensors have the same dimensions, at most one cell matches a cell in the other tensor */
private Tensor singleSpaceJoin(Tensor a, Tensor b, TensorType joinedType) {
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = a.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = i.next();
double bCellValue = b.get(aCell.getKey());
if (Double.isNaN(bCellValue)) continue;
builder.cell(aCell.getKey(), combinator.applyAsDouble(aCell.getValue(), bCellValue));
}
return builder.build();
}
/** Join a tensor into a superspace */
private Tensor subspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace instanceof IndexedTensor && superspace instanceof IndexedTensor)
return indexedSubspaceJoin((IndexedTensor) subspace, (IndexedTensor) superspace, joinedType, reversedArgumentOrder);
else
return generalSubspaceJoin(subspace, superspace, joinedType, reversedArgumentOrder);
}
private Tensor indexedSubspaceJoin(IndexedTensor subspace, IndexedTensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace.size() == 0 || superspace.size() == 0)
return Tensor.Builder.of(joinedType, new DimensionSizes.Builder(joinedType.dimensions().size()).build()).build();
DimensionSizes joinedSizes = joinedSize(joinedType, subspace, superspace);
IndexedTensor.Builder builder = (IndexedTensor.Builder)Tensor.Builder.of(joinedType, joinedSizes);
Set<String> superDimensionNames = new HashSet<>(superspace.type().dimensionNames());
superDimensionNames.removeAll(subspace.type().dimensionNames());
for (Iterator<IndexedTensor.SubspaceIterator> i = superspace.subspaceIterator(superDimensionNames, joinedSizes); i.hasNext(); ) {
IndexedTensor.SubspaceIterator subspaceInSuper = i.next();
joinSubspaces(subspace.valueIterator(), subspace.size(),
subspaceInSuper, subspaceInSuper.size(),
reversedArgumentOrder, builder);
}
return builder.build();
}
private void joinSubspaces(Iterator<Double> subspace, int subspaceSize,
Iterator<Tensor.Cell> superspace, int superspaceSize,
boolean reversedArgumentOrder, IndexedTensor.Builder builder) {
int joinedLength = Math.min(subspaceSize, superspaceSize);
if (reversedArgumentOrder) {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(supercell.getValue(), subspace.next()));
}
} else {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(subspace.next(), supercell.getValue()));
}
}
}
private DimensionSizes joinedSize(TensorType joinedType, IndexedTensor a, IndexedTensor b) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(joinedType.dimensions().size());
for (int i = 0; i < builder.dimensions(); i++) {
String dimensionName = joinedType.dimensions().get(i).name();
Optional<Integer> aIndex = a.type().indexOfDimension(dimensionName);
Optional<Integer> bIndex = b.type().indexOfDimension(dimensionName);
if (aIndex.isPresent() && bIndex.isPresent())
builder.set(i, Math.min(b.dimensionSizes().size(bIndex.get()), a.dimensionSizes().size(aIndex.get())));
else if (aIndex.isPresent())
builder.set(i, a.dimensionSizes().size(aIndex.get()));
else if (bIndex.isPresent())
builder.set(i, b.dimensionSizes().size(bIndex.get()));
}
return builder.build();
}
private Tensor generalSubspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
int[] subspaceIndexes = subspaceIndexes(superspace.type(), subspace.type());
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = superspace.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> supercell = i.next();
TensorAddress subaddress = mapAddressToSubspace(supercell.getKey(), subspaceIndexes);
double subspaceValue = subspace.get(subaddress);
if ( ! Double.isNaN(subspaceValue))
builder.cell(supercell.getKey(),
reversedArgumentOrder ? combinator.applyAsDouble(supercell.getValue(), subspaceValue)
: combinator.applyAsDouble(subspaceValue, supercell.getValue()));
}
return builder.build();
}
/** Returns the indexes in the superspace type which should be retained to create the subspace type */
private int[] subspaceIndexes(TensorType supertype, TensorType subtype) {
int[] subspaceIndexes = new int[subtype.dimensions().size()];
for (int i = 0; i < subtype.dimensions().size(); i++)
subspaceIndexes[i] = supertype.indexOfDimension(subtype.dimensions().get(i).name()).get();
return subspaceIndexes;
}
private TensorAddress mapAddressToSubspace(TensorAddress superAddress, int[] subspaceIndexes) {
String[] subspaceLabels = new String[subspaceIndexes.length];
for (int i = 0; i < subspaceIndexes.length; i++)
subspaceLabels[i] = superAddress.label(subspaceIndexes[i]);
return TensorAddress.of(subspaceLabels);
}
/** Slow join which works for any two tensors */
private Tensor generalJoin(Tensor a, Tensor b, TensorType joinedType) {
if (a instanceof IndexedTensor && b instanceof IndexedTensor)
return indexedGeneralJoin((IndexedTensor) a, (IndexedTensor) b, joinedType);
else
return mappedHashJoin(a, b, joinedType);
}
private Tensor indexedGeneralJoin(IndexedTensor a, IndexedTensor b, TensorType joinedType) {
DimensionSizes joinedSize = joinedSize(joinedType, a, b);
Tensor.Builder builder = Tensor.Builder.of(joinedType, joinedSize);
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
joinTo(a, b, joinedType, joinedSize, aToIndexes, bToIndexes, false, builder);
joinTo(b, a, joinedType, joinedSize, bToIndexes, aToIndexes, true, builder);
return builder.build();
}
private void joinTo(IndexedTensor a, IndexedTensor b, TensorType joinedType, DimensionSizes joinedSize,
int[] aToIndexes, int[] bToIndexes, boolean reversedOrder, Tensor.Builder builder) {
Set<String> sharedDimensions = Sets.intersection(a.type().dimensionNames(), b.type().dimensionNames());
Set<String> dimensionsOnlyInA = Sets.difference(a.type().dimensionNames(), b.type().dimensionNames());
DimensionSizes aIterateSize = joinedSizeOf(a.type(), joinedType, joinedSize);
DimensionSizes bIterateSize = joinedSizeOf(b.type(), joinedType, joinedSize);
for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(dimensionsOnlyInA, aIterateSize); ia.hasNext(); ) {
IndexedTensor.SubspaceIterator aSubspace = ia.next();
while (aSubspace.hasNext()) {
Tensor.Cell aCell = aSubspace.next();
PartialAddress matchingBCells = partialAddress(a.type(), aSubspace.address(), sharedDimensions);
for (IndexedTensor.SubspaceIterator bSubspace = b.cellIterator(matchingBCells, bIterateSize); bSubspace.hasNext(); ) {
Tensor.Cell bCell = bSubspace.next();
TensorAddress joinedAddress = joinAddresses(aCell.getKey(), aToIndexes, bCell.getKey(), bToIndexes, joinedType);
double joinedValue = reversedOrder ? combinator.applyAsDouble(bCell.getValue(), aCell.getValue())
: combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(joinedAddress, joinedValue);
}
}
}
}
private PartialAddress partialAddress(TensorType addressType, TensorAddress address, Set<String> retainDimensions) {
PartialAddress.Builder builder = new PartialAddress.Builder(retainDimensions.size());
for (int i = 0; i < addressType.dimensions().size(); i++)
if (retainDimensions.contains(addressType.dimensions().get(i).name()))
builder.add(addressType.dimensions().get(i).name(), address.intLabel(i));
return builder.build();
}
/** Returns the sizes from the joined sizes which are present in the type argument */
private DimensionSizes joinedSizeOf(TensorType type, TensorType joinedType, DimensionSizes joinedSizes) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size());
int dimensionIndex = 0;
for (int i = 0; i < joinedType.dimensions().size(); i++) {
if (type.dimensionNames().contains(joinedType.dimensions().get(i).name()))
builder.set(dimensionIndex++, joinedSizes.size(i));
}
return builder.build();
}
private Tensor mappedGeneralJoin(Tensor a, Tensor b, TensorType joinedType) {
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> aIterator = a.cellIterator(); aIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = aIterator.next();
for (Iterator<Tensor.Cell> bIterator = b.cellIterator(); bIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> bCell = bIterator.next();
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
bCell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), bCell.getValue()));
}
}
return builder.build();
}
/**
* Returns the an array having one entry in order for each dimension of fromType
* containing the index at which toType contains the same dimension name.
* That is, if the returned array contains n at index i then
* fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name())
* If some dimension in fromType is not present in toType, the corresponding index will be -1
*/
private int[] mapIndexes(TensorType fromType, TensorType toType) {
int[] toIndexes = new int[fromType.dimensions().size()];
for (int i = 0; i < fromType.dimensions().size(); i++)
toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1);
return toIndexes;
}
private TensorAddress joinAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes,
TensorType joinedType) {
String[] joinedLabels = new String[joinedType.dimensions().size()];
mapContent(a, joinedLabels, aToIndexes);
boolean compatible = mapContent(b, joinedLabels, bToIndexes);
if ( ! compatible) return null;
return TensorAddress.of(joinedLabels);
}
/**
* Maps the content in the given list to the given array, using the given index map.
*
* @return true if the mapping was successful, false if one of the destination positions was
* occupied by a different value
*/
private boolean mapContent(TensorAddress from, String[] to, int[] indexMap) {
for (int i = 0; i < from.size(); i++) {
int toIndex = indexMap[i];
if (to[toIndex] != null && ! to[toIndex].equals(from.label(i))) return false;
to[toIndex] = from.label(i);
}
return true;
}
/**
* Returns common dimension of a and b as a new tensor type
*/
private TensorType commonDimensions(Tensor a, Tensor b) {
TensorType.Builder typeBuilder = new TensorType.Builder();
TensorType aType = a.type();
TensorType bType = b.type();
for (int i = 0; i < aType.dimensions().size(); ++i) {
TensorType.Dimension aDim = aType.dimensions().get(i);
for (int j = 0; j < bType.dimensions().size(); ++j) {
TensorType.Dimension bDim = bType.dimensions().get(j);
if (aDim.equals(bDim)) {
typeBuilder.set(bDim);
}
}
}
return typeBuilder.build();
}
private TensorAddress partialCommonAddress(Tensor.Cell cell, TensorType type, TensorType commonDimensions) {
TensorAddress address = cell.getKey();
String[] labels = new String[commonDimensions.dimensions().size()];
for (int i = 0; i < labels.length; ++i) {
String name = commonDimensions.dimensions().get(i).name();
int index = type.indexOfDimension(name).orElseThrow(RuntimeException::new);
labels[i] = address.label(index);
}
return TensorAddress.of(labels);
}
} | class Join extends PrimitiveTensorFunction {
private final TensorFunction argumentA, argumentB;
private final DoubleBinaryOperator combinator;
public Join(TensorFunction argumentA, TensorFunction argumentB, DoubleBinaryOperator combinator) {
Objects.requireNonNull(argumentA, "The first argument tensor cannot be null");
Objects.requireNonNull(argumentB, "The second argument tensor cannot be null");
Objects.requireNonNull(combinator, "The combinator function cannot be null");
this.argumentA = argumentA;
this.argumentB = argumentB;
this.combinator = combinator;
}
public TensorFunction argumentA() { return argumentA; }
public TensorFunction argumentB() { return argumentB; }
public DoubleBinaryOperator combinator() { return combinator; }
@Override
public List<TensorFunction> functionArguments() { return ImmutableList.of(argumentA, argumentB); }
@Override
public TensorFunction replaceArguments(List<TensorFunction> arguments) {
if ( arguments.size() != 2)
throw new IllegalArgumentException("Join must have 2 arguments, got " + arguments.size());
return new Join(arguments.get(0), arguments.get(1), combinator);
}
@Override
public PrimitiveTensorFunction toPrimitive() {
return new Join(argumentA.toPrimitive(), argumentB.toPrimitive(), combinator);
}
@Override
public String toString(ToStringContext context) {
return "join(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + combinator + ")";
}
@Override
public Tensor evaluate(EvaluationContext context) {
Tensor a = argumentA.evaluate(context);
Tensor b = argumentB.evaluate(context);
TensorType joinedType = new TensorType.Builder(a.type(), b.type()).build();
if (hasSingleIndexedDimension(a) && hasSingleIndexedDimension(b) && a.type().dimensions().get(0).name().equals(b.type().dimensions().get(0).name()))
return indexedVectorJoin((IndexedTensor)a, (IndexedTensor)b, joinedType);
else if (joinedType.dimensions().size() == a.type().dimensions().size() && joinedType.dimensions().size() == b.type().dimensions().size())
return singleSpaceJoin(a, b, joinedType);
else if (a.type().dimensions().containsAll(b.type().dimensions()))
return subspaceJoin(b, a, joinedType, true);
else if (b.type().dimensions().containsAll(a.type().dimensions()))
return subspaceJoin(a, b, joinedType, false);
else
return generalJoin(a, b, joinedType);
}
private boolean hasSingleIndexedDimension(Tensor tensor) {
return tensor.type().dimensions().size() == 1 && tensor.type().dimensions().get(0).isIndexed();
}
private Tensor indexedVectorJoin(IndexedTensor a, IndexedTensor b, TensorType type) {
int joinedLength = Math.min(a.dimensionSizes().size(0), b.dimensionSizes().size(0));
Iterator<Double> aIterator = a.valueIterator();
Iterator<Double> bIterator = b.valueIterator();
IndexedTensor.Builder builder = IndexedTensor.Builder.of(type, new DimensionSizes.Builder(1).set(0, joinedLength).build());
for (int i = 0; i < joinedLength; i++)
builder.cell(combinator.applyAsDouble(aIterator.next(), bIterator.next()), i);
return builder.build();
}
/** When both tensors have the same dimensions, at most one cell matches a cell in the other tensor */
private Tensor singleSpaceJoin(Tensor a, Tensor b, TensorType joinedType) {
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = a.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = i.next();
double bCellValue = b.get(aCell.getKey());
if (Double.isNaN(bCellValue)) continue;
builder.cell(aCell.getKey(), combinator.applyAsDouble(aCell.getValue(), bCellValue));
}
return builder.build();
}
/** Join a tensor into a superspace */
private Tensor subspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace instanceof IndexedTensor && superspace instanceof IndexedTensor)
return indexedSubspaceJoin((IndexedTensor) subspace, (IndexedTensor) superspace, joinedType, reversedArgumentOrder);
else
return generalSubspaceJoin(subspace, superspace, joinedType, reversedArgumentOrder);
}
private Tensor indexedSubspaceJoin(IndexedTensor subspace, IndexedTensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace.size() == 0 || superspace.size() == 0)
return Tensor.Builder.of(joinedType, new DimensionSizes.Builder(joinedType.dimensions().size()).build()).build();
DimensionSizes joinedSizes = joinedSize(joinedType, subspace, superspace);
IndexedTensor.Builder builder = (IndexedTensor.Builder)Tensor.Builder.of(joinedType, joinedSizes);
Set<String> superDimensionNames = new HashSet<>(superspace.type().dimensionNames());
superDimensionNames.removeAll(subspace.type().dimensionNames());
for (Iterator<IndexedTensor.SubspaceIterator> i = superspace.subspaceIterator(superDimensionNames, joinedSizes); i.hasNext(); ) {
IndexedTensor.SubspaceIterator subspaceInSuper = i.next();
joinSubspaces(subspace.valueIterator(), subspace.size(),
subspaceInSuper, subspaceInSuper.size(),
reversedArgumentOrder, builder);
}
return builder.build();
}
private void joinSubspaces(Iterator<Double> subspace, int subspaceSize,
Iterator<Tensor.Cell> superspace, int superspaceSize,
boolean reversedArgumentOrder, IndexedTensor.Builder builder) {
int joinedLength = Math.min(subspaceSize, superspaceSize);
if (reversedArgumentOrder) {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(supercell.getValue(), subspace.next()));
}
} else {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(subspace.next(), supercell.getValue()));
}
}
}
private DimensionSizes joinedSize(TensorType joinedType, IndexedTensor a, IndexedTensor b) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(joinedType.dimensions().size());
for (int i = 0; i < builder.dimensions(); i++) {
String dimensionName = joinedType.dimensions().get(i).name();
Optional<Integer> aIndex = a.type().indexOfDimension(dimensionName);
Optional<Integer> bIndex = b.type().indexOfDimension(dimensionName);
if (aIndex.isPresent() && bIndex.isPresent())
builder.set(i, Math.min(b.dimensionSizes().size(bIndex.get()), a.dimensionSizes().size(aIndex.get())));
else if (aIndex.isPresent())
builder.set(i, a.dimensionSizes().size(aIndex.get()));
else if (bIndex.isPresent())
builder.set(i, b.dimensionSizes().size(bIndex.get()));
}
return builder.build();
}
private Tensor generalSubspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
int[] subspaceIndexes = subspaceIndexes(superspace.type(), subspace.type());
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = superspace.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> supercell = i.next();
TensorAddress subaddress = mapAddressToSubspace(supercell.getKey(), subspaceIndexes);
double subspaceValue = subspace.get(subaddress);
if ( ! Double.isNaN(subspaceValue))
builder.cell(supercell.getKey(),
reversedArgumentOrder ? combinator.applyAsDouble(supercell.getValue(), subspaceValue)
: combinator.applyAsDouble(subspaceValue, supercell.getValue()));
}
return builder.build();
}
/** Returns the indexes in the superspace type which should be retained to create the subspace type */
private int[] subspaceIndexes(TensorType supertype, TensorType subtype) {
int[] subspaceIndexes = new int[subtype.dimensions().size()];
for (int i = 0; i < subtype.dimensions().size(); i++)
subspaceIndexes[i] = supertype.indexOfDimension(subtype.dimensions().get(i).name()).get();
return subspaceIndexes;
}
private TensorAddress mapAddressToSubspace(TensorAddress superAddress, int[] subspaceIndexes) {
String[] subspaceLabels = new String[subspaceIndexes.length];
for (int i = 0; i < subspaceIndexes.length; i++)
subspaceLabels[i] = superAddress.label(subspaceIndexes[i]);
return TensorAddress.of(subspaceLabels);
}
/** Slow join which works for any two tensors */
private Tensor generalJoin(Tensor a, Tensor b, TensorType joinedType) {
if (a instanceof IndexedTensor && b instanceof IndexedTensor)
return indexedGeneralJoin((IndexedTensor) a, (IndexedTensor) b, joinedType);
else
return mappedHashJoin(a, b, joinedType);
}
private Tensor indexedGeneralJoin(IndexedTensor a, IndexedTensor b, TensorType joinedType) {
DimensionSizes joinedSize = joinedSize(joinedType, a, b);
Tensor.Builder builder = Tensor.Builder.of(joinedType, joinedSize);
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
joinTo(a, b, joinedType, joinedSize, aToIndexes, bToIndexes, false, builder);
joinTo(b, a, joinedType, joinedSize, bToIndexes, aToIndexes, true, builder);
return builder.build();
}
private void joinTo(IndexedTensor a, IndexedTensor b, TensorType joinedType, DimensionSizes joinedSize,
int[] aToIndexes, int[] bToIndexes, boolean reversedOrder, Tensor.Builder builder) {
Set<String> sharedDimensions = Sets.intersection(a.type().dimensionNames(), b.type().dimensionNames());
Set<String> dimensionsOnlyInA = Sets.difference(a.type().dimensionNames(), b.type().dimensionNames());
DimensionSizes aIterateSize = joinedSizeOf(a.type(), joinedType, joinedSize);
DimensionSizes bIterateSize = joinedSizeOf(b.type(), joinedType, joinedSize);
for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(dimensionsOnlyInA, aIterateSize); ia.hasNext(); ) {
IndexedTensor.SubspaceIterator aSubspace = ia.next();
while (aSubspace.hasNext()) {
Tensor.Cell aCell = aSubspace.next();
PartialAddress matchingBCells = partialAddress(a.type(), aSubspace.address(), sharedDimensions);
for (IndexedTensor.SubspaceIterator bSubspace = b.cellIterator(matchingBCells, bIterateSize); bSubspace.hasNext(); ) {
Tensor.Cell bCell = bSubspace.next();
TensorAddress joinedAddress = joinAddresses(aCell.getKey(), aToIndexes, bCell.getKey(), bToIndexes, joinedType);
double joinedValue = reversedOrder ? combinator.applyAsDouble(bCell.getValue(), aCell.getValue())
: combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(joinedAddress, joinedValue);
}
}
}
}
private PartialAddress partialAddress(TensorType addressType, TensorAddress address, Set<String> retainDimensions) {
PartialAddress.Builder builder = new PartialAddress.Builder(retainDimensions.size());
for (int i = 0; i < addressType.dimensions().size(); i++)
if (retainDimensions.contains(addressType.dimensions().get(i).name()))
builder.add(addressType.dimensions().get(i).name(), address.intLabel(i));
return builder.build();
}
/** Returns the sizes from the joined sizes which are present in the type argument */
private DimensionSizes joinedSizeOf(TensorType type, TensorType joinedType, DimensionSizes joinedSizes) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size());
int dimensionIndex = 0;
for (int i = 0; i < joinedType.dimensions().size(); i++) {
if (type.dimensionNames().contains(joinedType.dimensions().get(i).name()))
builder.set(dimensionIndex++, joinedSizes.size(i));
}
return builder.build();
}
private Tensor mappedGeneralJoin(Tensor a, Tensor b, TensorType joinedType) {
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> aIterator = a.cellIterator(); aIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = aIterator.next();
for (Iterator<Tensor.Cell> bIterator = b.cellIterator(); bIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> bCell = bIterator.next();
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
bCell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), bCell.getValue()));
}
}
return builder.build();
}
/**
* Returns the an array having one entry in order for each dimension of fromType
* containing the index at which toType contains the same dimension name.
* That is, if the returned array contains n at index i then
* fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name())
* If some dimension in fromType is not present in toType, the corresponding index will be -1
*/
private int[] mapIndexes(TensorType fromType, TensorType toType) {
int[] toIndexes = new int[fromType.dimensions().size()];
for (int i = 0; i < fromType.dimensions().size(); i++)
toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1);
return toIndexes;
}
private TensorAddress joinAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes,
TensorType joinedType) {
String[] joinedLabels = new String[joinedType.dimensions().size()];
mapContent(a, joinedLabels, aToIndexes);
boolean compatible = mapContent(b, joinedLabels, bToIndexes);
if ( ! compatible) return null;
return TensorAddress.of(joinedLabels);
}
/**
* Maps the content in the given list to the given array, using the given index map.
*
* @return true if the mapping was successful, false if one of the destination positions was
* occupied by a different value
*/
private boolean mapContent(TensorAddress from, String[] to, int[] indexMap) {
for (int i = 0; i < from.size(); i++) {
int toIndex = indexMap[i];
if (to[toIndex] != null && ! to[toIndex].equals(from.label(i))) return false;
to[toIndex] = from.label(i);
}
return true;
}
/**
* Returns common dimension of a and b as a new tensor type
*/
private TensorType commonDimensions(Tensor a, Tensor b) {
TensorType.Builder typeBuilder = new TensorType.Builder();
TensorType aType = a.type();
TensorType bType = b.type();
for (int i = 0; i < aType.dimensions().size(); ++i) {
TensorType.Dimension aDim = aType.dimensions().get(i);
for (int j = 0; j < bType.dimensions().size(); ++j) {
TensorType.Dimension bDim = bType.dimensions().get(j);
if (aDim.equals(bDim)) {
typeBuilder.set(bDim);
}
}
}
return typeBuilder.build();
}
private TensorAddress partialCommonAddress(Tensor.Cell cell, int[] indexMap) {
TensorAddress address = cell.getKey();
String[] labels = new String[indexMap.length];
for (int i = 0; i < labels.length; ++i) {
labels[i] = address.label(indexMap[i]);
}
return TensorAddress.of(labels);
}
} |
nitpick: This comment should just say "fallback", as the if part and what we fall back to is perfectly clear from your code. | private Tensor mappedHashJoin(Tensor a, Tensor b, TensorType joinedType) {
TensorType commonDimensionType = commonDimensions(a, b);
if (commonDimensionType.dimensions().isEmpty()) {
return mappedGeneralJoin(a, b, joinedType);
}
Tensor smallerTensor = a.size() > b.size() ? b : a;
Tensor largerTensor = a.size() > b.size() ? a : b;
a = smallerTensor;
b = largerTensor;
Map<TensorAddress, List<Tensor.Cell>> aCellsByCommonAddress = new HashMap<>();
for (Iterator<Tensor.Cell> cellIterator = a.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell cell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(cell, a.type(), commonDimensionType);
aCellsByCommonAddress.putIfAbsent(partialCommonAddress, new ArrayList<>());
aCellsByCommonAddress.get(partialCommonAddress).add(cell);
}
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> cellIterator = b.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell cell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(cell, b.type(), commonDimensionType);
for (Tensor.Cell aCell : aCellsByCommonAddress.getOrDefault(partialCommonAddress, Collections.emptyList())) {
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
cell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), cell.getValue()));
}
}
return builder.build();
} | return mappedGeneralJoin(a, b, joinedType); | private Tensor mappedHashJoin(Tensor a, Tensor b, TensorType joinedType) {
TensorType commonDimensionType = commonDimensions(a, b);
if (commonDimensionType.dimensions().isEmpty()) {
return mappedGeneralJoin(a, b, joinedType);
}
boolean swapTensors = a.size() > b.size();
if (swapTensors) {
Tensor temp = a;
a = b;
b = temp;
}
int[] aIndexesInCommon = mapIndexes(commonDimensionType, a.type());
int[] bIndexesInCommon = mapIndexes(commonDimensionType, b.type());
int[] aIndexesInJoined = mapIndexes(a.type(), joinedType);
int[] bIndexesInJoined = mapIndexes(b.type(), joinedType);
Map<TensorAddress, List<Tensor.Cell>> aCellsByCommonAddress = new HashMap<>();
for (Iterator<Tensor.Cell> cellIterator = a.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell aCell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(aCell, aIndexesInCommon);
aCellsByCommonAddress.putIfAbsent(partialCommonAddress, new ArrayList<>());
aCellsByCommonAddress.get(partialCommonAddress).add(aCell);
}
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> cellIterator = b.cellIterator(); cellIterator.hasNext(); ) {
Tensor.Cell bCell = cellIterator.next();
TensorAddress partialCommonAddress = partialCommonAddress(bCell, bIndexesInCommon);
for (Tensor.Cell aCell : aCellsByCommonAddress.getOrDefault(partialCommonAddress, Collections.emptyList())) {
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aIndexesInJoined,
bCell.getKey(), bIndexesInJoined, joinedType);
if (combinedAddress == null) continue;
double combinedValue = swapTensors ?
combinator.applyAsDouble(bCell.getValue(), aCell.getValue()) :
combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(combinedAddress, combinedValue);
}
}
return builder.build();
} | class Join extends PrimitiveTensorFunction {
private final TensorFunction argumentA, argumentB;
private final DoubleBinaryOperator combinator;
public Join(TensorFunction argumentA, TensorFunction argumentB, DoubleBinaryOperator combinator) {
Objects.requireNonNull(argumentA, "The first argument tensor cannot be null");
Objects.requireNonNull(argumentB, "The second argument tensor cannot be null");
Objects.requireNonNull(combinator, "The combinator function cannot be null");
this.argumentA = argumentA;
this.argumentB = argumentB;
this.combinator = combinator;
}
public TensorFunction argumentA() { return argumentA; }
public TensorFunction argumentB() { return argumentB; }
public DoubleBinaryOperator combinator() { return combinator; }
@Override
public List<TensorFunction> functionArguments() { return ImmutableList.of(argumentA, argumentB); }
@Override
public TensorFunction replaceArguments(List<TensorFunction> arguments) {
if ( arguments.size() != 2)
throw new IllegalArgumentException("Join must have 2 arguments, got " + arguments.size());
return new Join(arguments.get(0), arguments.get(1), combinator);
}
@Override
public PrimitiveTensorFunction toPrimitive() {
return new Join(argumentA.toPrimitive(), argumentB.toPrimitive(), combinator);
}
@Override
public String toString(ToStringContext context) {
return "join(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + combinator + ")";
}
@Override
public Tensor evaluate(EvaluationContext context) {
Tensor a = argumentA.evaluate(context);
Tensor b = argumentB.evaluate(context);
TensorType joinedType = new TensorType.Builder(a.type(), b.type()).build();
if (hasSingleIndexedDimension(a) && hasSingleIndexedDimension(b) && a.type().dimensions().get(0).name().equals(b.type().dimensions().get(0).name()))
return indexedVectorJoin((IndexedTensor)a, (IndexedTensor)b, joinedType);
else if (joinedType.dimensions().size() == a.type().dimensions().size() && joinedType.dimensions().size() == b.type().dimensions().size())
return singleSpaceJoin(a, b, joinedType);
else if (a.type().dimensions().containsAll(b.type().dimensions()))
return subspaceJoin(b, a, joinedType, true);
else if (b.type().dimensions().containsAll(a.type().dimensions()))
return subspaceJoin(a, b, joinedType, false);
else
return generalJoin(a, b, joinedType);
}
private boolean hasSingleIndexedDimension(Tensor tensor) {
return tensor.type().dimensions().size() == 1 && tensor.type().dimensions().get(0).isIndexed();
}
private Tensor indexedVectorJoin(IndexedTensor a, IndexedTensor b, TensorType type) {
int joinedLength = Math.min(a.dimensionSizes().size(0), b.dimensionSizes().size(0));
Iterator<Double> aIterator = a.valueIterator();
Iterator<Double> bIterator = b.valueIterator();
IndexedTensor.Builder builder = IndexedTensor.Builder.of(type, new DimensionSizes.Builder(1).set(0, joinedLength).build());
for (int i = 0; i < joinedLength; i++)
builder.cell(combinator.applyAsDouble(aIterator.next(), bIterator.next()), i);
return builder.build();
}
/** When both tensors have the same dimensions, at most one cell matches a cell in the other tensor */
private Tensor singleSpaceJoin(Tensor a, Tensor b, TensorType joinedType) {
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = a.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = i.next();
double bCellValue = b.get(aCell.getKey());
if (Double.isNaN(bCellValue)) continue;
builder.cell(aCell.getKey(), combinator.applyAsDouble(aCell.getValue(), bCellValue));
}
return builder.build();
}
/** Join a tensor into a superspace */
private Tensor subspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace instanceof IndexedTensor && superspace instanceof IndexedTensor)
return indexedSubspaceJoin((IndexedTensor) subspace, (IndexedTensor) superspace, joinedType, reversedArgumentOrder);
else
return generalSubspaceJoin(subspace, superspace, joinedType, reversedArgumentOrder);
}
private Tensor indexedSubspaceJoin(IndexedTensor subspace, IndexedTensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace.size() == 0 || superspace.size() == 0)
return Tensor.Builder.of(joinedType, new DimensionSizes.Builder(joinedType.dimensions().size()).build()).build();
DimensionSizes joinedSizes = joinedSize(joinedType, subspace, superspace);
IndexedTensor.Builder builder = (IndexedTensor.Builder)Tensor.Builder.of(joinedType, joinedSizes);
Set<String> superDimensionNames = new HashSet<>(superspace.type().dimensionNames());
superDimensionNames.removeAll(subspace.type().dimensionNames());
for (Iterator<IndexedTensor.SubspaceIterator> i = superspace.subspaceIterator(superDimensionNames, joinedSizes); i.hasNext(); ) {
IndexedTensor.SubspaceIterator subspaceInSuper = i.next();
joinSubspaces(subspace.valueIterator(), subspace.size(),
subspaceInSuper, subspaceInSuper.size(),
reversedArgumentOrder, builder);
}
return builder.build();
}
private void joinSubspaces(Iterator<Double> subspace, int subspaceSize,
Iterator<Tensor.Cell> superspace, int superspaceSize,
boolean reversedArgumentOrder, IndexedTensor.Builder builder) {
int joinedLength = Math.min(subspaceSize, superspaceSize);
if (reversedArgumentOrder) {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(supercell.getValue(), subspace.next()));
}
} else {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(subspace.next(), supercell.getValue()));
}
}
}
private DimensionSizes joinedSize(TensorType joinedType, IndexedTensor a, IndexedTensor b) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(joinedType.dimensions().size());
for (int i = 0; i < builder.dimensions(); i++) {
String dimensionName = joinedType.dimensions().get(i).name();
Optional<Integer> aIndex = a.type().indexOfDimension(dimensionName);
Optional<Integer> bIndex = b.type().indexOfDimension(dimensionName);
if (aIndex.isPresent() && bIndex.isPresent())
builder.set(i, Math.min(b.dimensionSizes().size(bIndex.get()), a.dimensionSizes().size(aIndex.get())));
else if (aIndex.isPresent())
builder.set(i, a.dimensionSizes().size(aIndex.get()));
else if (bIndex.isPresent())
builder.set(i, b.dimensionSizes().size(bIndex.get()));
}
return builder.build();
}
private Tensor generalSubspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
int[] subspaceIndexes = subspaceIndexes(superspace.type(), subspace.type());
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = superspace.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> supercell = i.next();
TensorAddress subaddress = mapAddressToSubspace(supercell.getKey(), subspaceIndexes);
double subspaceValue = subspace.get(subaddress);
if ( ! Double.isNaN(subspaceValue))
builder.cell(supercell.getKey(),
reversedArgumentOrder ? combinator.applyAsDouble(supercell.getValue(), subspaceValue)
: combinator.applyAsDouble(subspaceValue, supercell.getValue()));
}
return builder.build();
}
/** Returns the indexes in the superspace type which should be retained to create the subspace type */
private int[] subspaceIndexes(TensorType supertype, TensorType subtype) {
int[] subspaceIndexes = new int[subtype.dimensions().size()];
for (int i = 0; i < subtype.dimensions().size(); i++)
subspaceIndexes[i] = supertype.indexOfDimension(subtype.dimensions().get(i).name()).get();
return subspaceIndexes;
}
private TensorAddress mapAddressToSubspace(TensorAddress superAddress, int[] subspaceIndexes) {
String[] subspaceLabels = new String[subspaceIndexes.length];
for (int i = 0; i < subspaceIndexes.length; i++)
subspaceLabels[i] = superAddress.label(subspaceIndexes[i]);
return TensorAddress.of(subspaceLabels);
}
/** Slow join which works for any two tensors */
private Tensor generalJoin(Tensor a, Tensor b, TensorType joinedType) {
if (a instanceof IndexedTensor && b instanceof IndexedTensor)
return indexedGeneralJoin((IndexedTensor) a, (IndexedTensor) b, joinedType);
else
return mappedHashJoin(a, b, joinedType);
}
private Tensor indexedGeneralJoin(IndexedTensor a, IndexedTensor b, TensorType joinedType) {
DimensionSizes joinedSize = joinedSize(joinedType, a, b);
Tensor.Builder builder = Tensor.Builder.of(joinedType, joinedSize);
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
joinTo(a, b, joinedType, joinedSize, aToIndexes, bToIndexes, false, builder);
joinTo(b, a, joinedType, joinedSize, bToIndexes, aToIndexes, true, builder);
return builder.build();
}
private void joinTo(IndexedTensor a, IndexedTensor b, TensorType joinedType, DimensionSizes joinedSize,
int[] aToIndexes, int[] bToIndexes, boolean reversedOrder, Tensor.Builder builder) {
Set<String> sharedDimensions = Sets.intersection(a.type().dimensionNames(), b.type().dimensionNames());
Set<String> dimensionsOnlyInA = Sets.difference(a.type().dimensionNames(), b.type().dimensionNames());
DimensionSizes aIterateSize = joinedSizeOf(a.type(), joinedType, joinedSize);
DimensionSizes bIterateSize = joinedSizeOf(b.type(), joinedType, joinedSize);
for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(dimensionsOnlyInA, aIterateSize); ia.hasNext(); ) {
IndexedTensor.SubspaceIterator aSubspace = ia.next();
while (aSubspace.hasNext()) {
Tensor.Cell aCell = aSubspace.next();
PartialAddress matchingBCells = partialAddress(a.type(), aSubspace.address(), sharedDimensions);
for (IndexedTensor.SubspaceIterator bSubspace = b.cellIterator(matchingBCells, bIterateSize); bSubspace.hasNext(); ) {
Tensor.Cell bCell = bSubspace.next();
TensorAddress joinedAddress = joinAddresses(aCell.getKey(), aToIndexes, bCell.getKey(), bToIndexes, joinedType);
double joinedValue = reversedOrder ? combinator.applyAsDouble(bCell.getValue(), aCell.getValue())
: combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(joinedAddress, joinedValue);
}
}
}
}
private PartialAddress partialAddress(TensorType addressType, TensorAddress address, Set<String> retainDimensions) {
PartialAddress.Builder builder = new PartialAddress.Builder(retainDimensions.size());
for (int i = 0; i < addressType.dimensions().size(); i++)
if (retainDimensions.contains(addressType.dimensions().get(i).name()))
builder.add(addressType.dimensions().get(i).name(), address.intLabel(i));
return builder.build();
}
/** Returns the sizes from the joined sizes which are present in the type argument */
private DimensionSizes joinedSizeOf(TensorType type, TensorType joinedType, DimensionSizes joinedSizes) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size());
int dimensionIndex = 0;
for (int i = 0; i < joinedType.dimensions().size(); i++) {
if (type.dimensionNames().contains(joinedType.dimensions().get(i).name()))
builder.set(dimensionIndex++, joinedSizes.size(i));
}
return builder.build();
}
private Tensor mappedGeneralJoin(Tensor a, Tensor b, TensorType joinedType) {
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> aIterator = a.cellIterator(); aIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = aIterator.next();
for (Iterator<Tensor.Cell> bIterator = b.cellIterator(); bIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> bCell = bIterator.next();
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
bCell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), bCell.getValue()));
}
}
return builder.build();
}
/**
* Returns the an array having one entry in order for each dimension of fromType
* containing the index at which toType contains the same dimension name.
* That is, if the returned array contains n at index i then
* fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name())
* If some dimension in fromType is not present in toType, the corresponding index will be -1
*/
private int[] mapIndexes(TensorType fromType, TensorType toType) {
int[] toIndexes = new int[fromType.dimensions().size()];
for (int i = 0; i < fromType.dimensions().size(); i++)
toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1);
return toIndexes;
}
private TensorAddress joinAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes,
TensorType joinedType) {
String[] joinedLabels = new String[joinedType.dimensions().size()];
mapContent(a, joinedLabels, aToIndexes);
boolean compatible = mapContent(b, joinedLabels, bToIndexes);
if ( ! compatible) return null;
return TensorAddress.of(joinedLabels);
}
/**
* Maps the content in the given list to the given array, using the given index map.
*
* @return true if the mapping was successful, false if one of the destination positions was
* occupied by a different value
*/
private boolean mapContent(TensorAddress from, String[] to, int[] indexMap) {
for (int i = 0; i < from.size(); i++) {
int toIndex = indexMap[i];
if (to[toIndex] != null && ! to[toIndex].equals(from.label(i))) return false;
to[toIndex] = from.label(i);
}
return true;
}
/**
* Returns common dimension of a and b as a new tensor type
*/
private TensorType commonDimensions(Tensor a, Tensor b) {
TensorType.Builder typeBuilder = new TensorType.Builder();
TensorType aType = a.type();
TensorType bType = b.type();
for (int i = 0; i < aType.dimensions().size(); ++i) {
TensorType.Dimension aDim = aType.dimensions().get(i);
for (int j = 0; j < bType.dimensions().size(); ++j) {
TensorType.Dimension bDim = bType.dimensions().get(j);
if (aDim.equals(bDim)) {
typeBuilder.set(bDim);
}
}
}
return typeBuilder.build();
}
private TensorAddress partialCommonAddress(Tensor.Cell cell, TensorType type, TensorType commonDimensions) {
TensorAddress address = cell.getKey();
String[] labels = new String[commonDimensions.dimensions().size()];
for (int i = 0; i < labels.length; ++i) {
String name = commonDimensions.dimensions().get(i).name();
int index = type.indexOfDimension(name).orElseThrow(RuntimeException::new);
labels[i] = address.label(index);
}
return TensorAddress.of(labels);
}
} | class Join extends PrimitiveTensorFunction {
private final TensorFunction argumentA, argumentB;
private final DoubleBinaryOperator combinator;
public Join(TensorFunction argumentA, TensorFunction argumentB, DoubleBinaryOperator combinator) {
Objects.requireNonNull(argumentA, "The first argument tensor cannot be null");
Objects.requireNonNull(argumentB, "The second argument tensor cannot be null");
Objects.requireNonNull(combinator, "The combinator function cannot be null");
this.argumentA = argumentA;
this.argumentB = argumentB;
this.combinator = combinator;
}
public TensorFunction argumentA() { return argumentA; }
public TensorFunction argumentB() { return argumentB; }
public DoubleBinaryOperator combinator() { return combinator; }
@Override
public List<TensorFunction> functionArguments() { return ImmutableList.of(argumentA, argumentB); }
@Override
public TensorFunction replaceArguments(List<TensorFunction> arguments) {
if ( arguments.size() != 2)
throw new IllegalArgumentException("Join must have 2 arguments, got " + arguments.size());
return new Join(arguments.get(0), arguments.get(1), combinator);
}
@Override
public PrimitiveTensorFunction toPrimitive() {
return new Join(argumentA.toPrimitive(), argumentB.toPrimitive(), combinator);
}
@Override
public String toString(ToStringContext context) {
return "join(" + argumentA.toString(context) + ", " + argumentB.toString(context) + ", " + combinator + ")";
}
@Override
public Tensor evaluate(EvaluationContext context) {
Tensor a = argumentA.evaluate(context);
Tensor b = argumentB.evaluate(context);
TensorType joinedType = new TensorType.Builder(a.type(), b.type()).build();
if (hasSingleIndexedDimension(a) && hasSingleIndexedDimension(b) && a.type().dimensions().get(0).name().equals(b.type().dimensions().get(0).name()))
return indexedVectorJoin((IndexedTensor)a, (IndexedTensor)b, joinedType);
else if (joinedType.dimensions().size() == a.type().dimensions().size() && joinedType.dimensions().size() == b.type().dimensions().size())
return singleSpaceJoin(a, b, joinedType);
else if (a.type().dimensions().containsAll(b.type().dimensions()))
return subspaceJoin(b, a, joinedType, true);
else if (b.type().dimensions().containsAll(a.type().dimensions()))
return subspaceJoin(a, b, joinedType, false);
else
return generalJoin(a, b, joinedType);
}
private boolean hasSingleIndexedDimension(Tensor tensor) {
return tensor.type().dimensions().size() == 1 && tensor.type().dimensions().get(0).isIndexed();
}
private Tensor indexedVectorJoin(IndexedTensor a, IndexedTensor b, TensorType type) {
int joinedLength = Math.min(a.dimensionSizes().size(0), b.dimensionSizes().size(0));
Iterator<Double> aIterator = a.valueIterator();
Iterator<Double> bIterator = b.valueIterator();
IndexedTensor.Builder builder = IndexedTensor.Builder.of(type, new DimensionSizes.Builder(1).set(0, joinedLength).build());
for (int i = 0; i < joinedLength; i++)
builder.cell(combinator.applyAsDouble(aIterator.next(), bIterator.next()), i);
return builder.build();
}
/** When both tensors have the same dimensions, at most one cell matches a cell in the other tensor */
private Tensor singleSpaceJoin(Tensor a, Tensor b, TensorType joinedType) {
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = a.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = i.next();
double bCellValue = b.get(aCell.getKey());
if (Double.isNaN(bCellValue)) continue;
builder.cell(aCell.getKey(), combinator.applyAsDouble(aCell.getValue(), bCellValue));
}
return builder.build();
}
/** Join a tensor into a superspace */
private Tensor subspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace instanceof IndexedTensor && superspace instanceof IndexedTensor)
return indexedSubspaceJoin((IndexedTensor) subspace, (IndexedTensor) superspace, joinedType, reversedArgumentOrder);
else
return generalSubspaceJoin(subspace, superspace, joinedType, reversedArgumentOrder);
}
private Tensor indexedSubspaceJoin(IndexedTensor subspace, IndexedTensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
if (subspace.size() == 0 || superspace.size() == 0)
return Tensor.Builder.of(joinedType, new DimensionSizes.Builder(joinedType.dimensions().size()).build()).build();
DimensionSizes joinedSizes = joinedSize(joinedType, subspace, superspace);
IndexedTensor.Builder builder = (IndexedTensor.Builder)Tensor.Builder.of(joinedType, joinedSizes);
Set<String> superDimensionNames = new HashSet<>(superspace.type().dimensionNames());
superDimensionNames.removeAll(subspace.type().dimensionNames());
for (Iterator<IndexedTensor.SubspaceIterator> i = superspace.subspaceIterator(superDimensionNames, joinedSizes); i.hasNext(); ) {
IndexedTensor.SubspaceIterator subspaceInSuper = i.next();
joinSubspaces(subspace.valueIterator(), subspace.size(),
subspaceInSuper, subspaceInSuper.size(),
reversedArgumentOrder, builder);
}
return builder.build();
}
private void joinSubspaces(Iterator<Double> subspace, int subspaceSize,
Iterator<Tensor.Cell> superspace, int superspaceSize,
boolean reversedArgumentOrder, IndexedTensor.Builder builder) {
int joinedLength = Math.min(subspaceSize, superspaceSize);
if (reversedArgumentOrder) {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(supercell.getValue(), subspace.next()));
}
} else {
for (int i = 0; i < joinedLength; i++) {
Tensor.Cell supercell = superspace.next();
builder.cell(supercell, combinator.applyAsDouble(subspace.next(), supercell.getValue()));
}
}
}
private DimensionSizes joinedSize(TensorType joinedType, IndexedTensor a, IndexedTensor b) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(joinedType.dimensions().size());
for (int i = 0; i < builder.dimensions(); i++) {
String dimensionName = joinedType.dimensions().get(i).name();
Optional<Integer> aIndex = a.type().indexOfDimension(dimensionName);
Optional<Integer> bIndex = b.type().indexOfDimension(dimensionName);
if (aIndex.isPresent() && bIndex.isPresent())
builder.set(i, Math.min(b.dimensionSizes().size(bIndex.get()), a.dimensionSizes().size(aIndex.get())));
else if (aIndex.isPresent())
builder.set(i, a.dimensionSizes().size(aIndex.get()));
else if (bIndex.isPresent())
builder.set(i, b.dimensionSizes().size(bIndex.get()));
}
return builder.build();
}
private Tensor generalSubspaceJoin(Tensor subspace, Tensor superspace, TensorType joinedType, boolean reversedArgumentOrder) {
int[] subspaceIndexes = subspaceIndexes(superspace.type(), subspace.type());
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = superspace.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> supercell = i.next();
TensorAddress subaddress = mapAddressToSubspace(supercell.getKey(), subspaceIndexes);
double subspaceValue = subspace.get(subaddress);
if ( ! Double.isNaN(subspaceValue))
builder.cell(supercell.getKey(),
reversedArgumentOrder ? combinator.applyAsDouble(supercell.getValue(), subspaceValue)
: combinator.applyAsDouble(subspaceValue, supercell.getValue()));
}
return builder.build();
}
/** Returns the indexes in the superspace type which should be retained to create the subspace type */
private int[] subspaceIndexes(TensorType supertype, TensorType subtype) {
int[] subspaceIndexes = new int[subtype.dimensions().size()];
for (int i = 0; i < subtype.dimensions().size(); i++)
subspaceIndexes[i] = supertype.indexOfDimension(subtype.dimensions().get(i).name()).get();
return subspaceIndexes;
}
private TensorAddress mapAddressToSubspace(TensorAddress superAddress, int[] subspaceIndexes) {
String[] subspaceLabels = new String[subspaceIndexes.length];
for (int i = 0; i < subspaceIndexes.length; i++)
subspaceLabels[i] = superAddress.label(subspaceIndexes[i]);
return TensorAddress.of(subspaceLabels);
}
/** Slow join which works for any two tensors */
private Tensor generalJoin(Tensor a, Tensor b, TensorType joinedType) {
if (a instanceof IndexedTensor && b instanceof IndexedTensor)
return indexedGeneralJoin((IndexedTensor) a, (IndexedTensor) b, joinedType);
else
return mappedHashJoin(a, b, joinedType);
}
private Tensor indexedGeneralJoin(IndexedTensor a, IndexedTensor b, TensorType joinedType) {
DimensionSizes joinedSize = joinedSize(joinedType, a, b);
Tensor.Builder builder = Tensor.Builder.of(joinedType, joinedSize);
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
joinTo(a, b, joinedType, joinedSize, aToIndexes, bToIndexes, false, builder);
joinTo(b, a, joinedType, joinedSize, bToIndexes, aToIndexes, true, builder);
return builder.build();
}
private void joinTo(IndexedTensor a, IndexedTensor b, TensorType joinedType, DimensionSizes joinedSize,
int[] aToIndexes, int[] bToIndexes, boolean reversedOrder, Tensor.Builder builder) {
Set<String> sharedDimensions = Sets.intersection(a.type().dimensionNames(), b.type().dimensionNames());
Set<String> dimensionsOnlyInA = Sets.difference(a.type().dimensionNames(), b.type().dimensionNames());
DimensionSizes aIterateSize = joinedSizeOf(a.type(), joinedType, joinedSize);
DimensionSizes bIterateSize = joinedSizeOf(b.type(), joinedType, joinedSize);
for (Iterator<IndexedTensor.SubspaceIterator> ia = a.subspaceIterator(dimensionsOnlyInA, aIterateSize); ia.hasNext(); ) {
IndexedTensor.SubspaceIterator aSubspace = ia.next();
while (aSubspace.hasNext()) {
Tensor.Cell aCell = aSubspace.next();
PartialAddress matchingBCells = partialAddress(a.type(), aSubspace.address(), sharedDimensions);
for (IndexedTensor.SubspaceIterator bSubspace = b.cellIterator(matchingBCells, bIterateSize); bSubspace.hasNext(); ) {
Tensor.Cell bCell = bSubspace.next();
TensorAddress joinedAddress = joinAddresses(aCell.getKey(), aToIndexes, bCell.getKey(), bToIndexes, joinedType);
double joinedValue = reversedOrder ? combinator.applyAsDouble(bCell.getValue(), aCell.getValue())
: combinator.applyAsDouble(aCell.getValue(), bCell.getValue());
builder.cell(joinedAddress, joinedValue);
}
}
}
}
private PartialAddress partialAddress(TensorType addressType, TensorAddress address, Set<String> retainDimensions) {
PartialAddress.Builder builder = new PartialAddress.Builder(retainDimensions.size());
for (int i = 0; i < addressType.dimensions().size(); i++)
if (retainDimensions.contains(addressType.dimensions().get(i).name()))
builder.add(addressType.dimensions().get(i).name(), address.intLabel(i));
return builder.build();
}
/** Returns the sizes from the joined sizes which are present in the type argument */
private DimensionSizes joinedSizeOf(TensorType type, TensorType joinedType, DimensionSizes joinedSizes) {
DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size());
int dimensionIndex = 0;
for (int i = 0; i < joinedType.dimensions().size(); i++) {
if (type.dimensionNames().contains(joinedType.dimensions().get(i).name()))
builder.set(dimensionIndex++, joinedSizes.size(i));
}
return builder.build();
}
private Tensor mappedGeneralJoin(Tensor a, Tensor b, TensorType joinedType) {
int[] aToIndexes = mapIndexes(a.type(), joinedType);
int[] bToIndexes = mapIndexes(b.type(), joinedType);
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> aIterator = a.cellIterator(); aIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = aIterator.next();
for (Iterator<Tensor.Cell> bIterator = b.cellIterator(); bIterator.hasNext(); ) {
Map.Entry<TensorAddress, Double> bCell = bIterator.next();
TensorAddress combinedAddress = joinAddresses(aCell.getKey(), aToIndexes,
bCell.getKey(), bToIndexes, joinedType);
if (combinedAddress == null) continue;
builder.cell(combinedAddress, combinator.applyAsDouble(aCell.getValue(), bCell.getValue()));
}
}
return builder.build();
}
/**
* Returns the an array having one entry in order for each dimension of fromType
* containing the index at which toType contains the same dimension name.
* That is, if the returned array contains n at index i then
* fromType.dimensions().get(i).name.equals(toType.dimensions().get(n).name())
* If some dimension in fromType is not present in toType, the corresponding index will be -1
*/
private int[] mapIndexes(TensorType fromType, TensorType toType) {
int[] toIndexes = new int[fromType.dimensions().size()];
for (int i = 0; i < fromType.dimensions().size(); i++)
toIndexes[i] = toType.indexOfDimension(fromType.dimensions().get(i).name()).orElse(-1);
return toIndexes;
}
private TensorAddress joinAddresses(TensorAddress a, int[] aToIndexes, TensorAddress b, int[] bToIndexes,
TensorType joinedType) {
String[] joinedLabels = new String[joinedType.dimensions().size()];
mapContent(a, joinedLabels, aToIndexes);
boolean compatible = mapContent(b, joinedLabels, bToIndexes);
if ( ! compatible) return null;
return TensorAddress.of(joinedLabels);
}
/**
* Maps the content in the given list to the given array, using the given index map.
*
* @return true if the mapping was successful, false if one of the destination positions was
* occupied by a different value
*/
private boolean mapContent(TensorAddress from, String[] to, int[] indexMap) {
for (int i = 0; i < from.size(); i++) {
int toIndex = indexMap[i];
if (to[toIndex] != null && ! to[toIndex].equals(from.label(i))) return false;
to[toIndex] = from.label(i);
}
return true;
}
/**
* Returns common dimension of a and b as a new tensor type
*/
private TensorType commonDimensions(Tensor a, Tensor b) {
TensorType.Builder typeBuilder = new TensorType.Builder();
TensorType aType = a.type();
TensorType bType = b.type();
for (int i = 0; i < aType.dimensions().size(); ++i) {
TensorType.Dimension aDim = aType.dimensions().get(i);
for (int j = 0; j < bType.dimensions().size(); ++j) {
TensorType.Dimension bDim = bType.dimensions().get(j);
if (aDim.equals(bDim)) {
typeBuilder.set(bDim);
}
}
}
return typeBuilder.build();
}
private TensorAddress partialCommonAddress(Tensor.Cell cell, int[] indexMap) {
TensorAddress address = cell.getKey();
String[] labels = new String[indexMap.length];
for (int i = 0; i < labels.length; ++i) {
labels[i] = address.label(indexMap[i]);
}
return TensorAddress.of(labels);
}
} |
What happens when SearchWord is expanded with a third option here (in the future that is) | protected double getCyclesPerSecond(ArrayList<ParseResult> parseResults){
double cycles = 0;
double seconds = 0;
for (ParseResult parseResult : parseResults){
switch (parseResult.getSearchWord()){
case CYCLES_SEARCH_WORD:
cycles = makeCyclesDouble(parseResult.getValue());
break;
case SECONDS_SEARCH_WORD:
seconds = makeSecondsDouble(parseResult.getValue());
break;
}
}
if (cycles != 0 && seconds != 0){
return convertToGHz(cycles, seconds);
}
return 0;
} | break; | protected double getCyclesPerSecond(ArrayList<ParseResult> parseResults) {
double cycles = -1;
double seconds = -1;
for (ParseResult parseResult : parseResults) {
switch (parseResult.getSearchWord()) {
case CYCLES_SEARCH_WORD:
cycles = makeCyclesDouble(parseResult.getValue());
break;
case SECONDS_SEARCH_WORD:
seconds = makeSecondsDouble(parseResult.getValue());
break;
default:
throw new RuntimeException("Invalid ParseResult searchWord");
}
}
if (cycles > 0 && seconds > 0) {
return convertToGHz(cycles, seconds);
}
return -1;
} | class CPUBenchmark implements Benchmark {
private final String CPU_BENCHMARK_COMMAND = "perf stat -e cycles dd if=/dev/zero of=/dev/null count=100000 2>&1 | grep 'cycles\\|seconds'";
private final String CYCLES_SEARCH_WORD = "cycles";
private final String SECONDS_SEARCH_WORD = "seconds";
private static final Logger logger = Logger.getLogger(CPUBenchmark.class.getName());
private final HardwareResults hardwareResults;
private final CommandExecutor commandExecutor;
public CPUBenchmark(HardwareResults hardwareResults, CommandExecutor commandExecutor){
this.hardwareResults = hardwareResults;
this.commandExecutor = commandExecutor;
}
public void doBenchmark(){
try {
ArrayList<String> commandOutput = commandExecutor.executeCommand(CPU_BENCHMARK_COMMAND);
ArrayList<ParseResult> parseResults = parseCpuCyclesPerSec(commandOutput);
setCpuCyclesPerSec(parseResults);
}
catch (IOException e){
logger.log(Level.WARNING, "Failed to perform CPU benchmark", e);
}
}
protected ArrayList<ParseResult> parseCpuCyclesPerSec(ArrayList<String> commandOutput) {
ArrayList<String> searchWords = new ArrayList<>(Arrays.asList(CYCLES_SEARCH_WORD, SECONDS_SEARCH_WORD));
String splitRegexString = "\\s+";
int searchElementIndex = 1;
int returnElementIndex = 0;
ParseInstructions parseInstructions = new ParseInstructions(searchElementIndex,returnElementIndex,splitRegexString, searchWords);
return OutputParser.parseOutput(parseInstructions,commandOutput);
}
protected void setCpuCyclesPerSec(ArrayList<ParseResult> parseResults){
double cpuCyclesPerSec = getCyclesPerSecond(parseResults);
if (cpuCyclesPerSec > 0) {
hardwareResults.setCpuCyclesPerSec(cpuCyclesPerSec);
}
}
protected double makeCyclesDouble(String cycles){
cycles = cycles.replaceAll("[^\\d]","");
if (checkIfNumber(cycles)){
return Double.parseDouble(cycles);
}
return 0;
}
protected double makeSecondsDouble(String seconds){
seconds = seconds.replaceAll(",",".");
if (checkIfNumber(seconds)){
return Double.parseDouble(seconds);
}
return 0;
}
protected boolean checkIfNumber(String numberCandidate){
if (numberCandidate == null || numberCandidate.equals("")){
return false;
}
try{
Double.parseDouble(numberCandidate);
}
catch (NumberFormatException e){
return false;
}
return true;
}
protected double convertToGHz(double cycles, double seconds){
double giga = 1000000000.0;
return (cycles/seconds) / giga;
}
} | class CPUBenchmark implements Benchmark {
private static final String CPU_BENCHMARK_COMMAND = "perf stat -e cycles dd if=/dev/zero of=/dev/null count=100000 2>&1 | grep 'cycles\\|seconds'";
private static final String CYCLES_SEARCH_WORD = "cycles";
private static final String SECONDS_SEARCH_WORD = "seconds";
private static final String SPLIT_REGEX_STRING = "\\s+";
private static final int SEARCH_ELEMENT_INDEX = 1;
private static final int RETURN_ELEMENT_INDEX = 0;
private static final Logger logger = Logger.getLogger(CPUBenchmark.class.getName());
private final HardwareResults hardwareResults;
private final CommandExecutor commandExecutor;
public CPUBenchmark(HardwareResults hardwareResults, CommandExecutor commandExecutor) {
this.hardwareResults = hardwareResults;
this.commandExecutor = commandExecutor;
}
public void doBenchmark() {
try {
ArrayList<String> commandOutput = commandExecutor.executeCommand(CPU_BENCHMARK_COMMAND);
ArrayList<ParseResult> parseResults = parseCpuCyclesPerSec(commandOutput);
setCpuCyclesPerSec(parseResults);
} catch (IOException e) {
logger.log(Level.WARNING, "Failed to perform CPU benchmark", e);
}
}
protected ArrayList<ParseResult> parseCpuCyclesPerSec(ArrayList<String> commandOutput) {
ArrayList<String> searchWords = new ArrayList<>(Arrays.asList(CYCLES_SEARCH_WORD, SECONDS_SEARCH_WORD));
ParseInstructions parseInstructions = new ParseInstructions(SEARCH_ELEMENT_INDEX, RETURN_ELEMENT_INDEX, SPLIT_REGEX_STRING, searchWords);
return OutputParser.parseOutput(parseInstructions, commandOutput);
}
protected void setCpuCyclesPerSec(ArrayList<ParseResult> parseResults) {
double cpuCyclesPerSec = getCyclesPerSecond(parseResults);
if (cpuCyclesPerSec > 0) {
hardwareResults.setCpuCyclesPerSec(cpuCyclesPerSec);
}
}
protected double makeCyclesDouble(String cycles) {
cycles = cycles.replaceAll("[^\\d]", "");
if (checkIfNumber(cycles)) {
return Double.parseDouble(cycles);
}
return -1;
}
protected double makeSecondsDouble(String seconds) {
seconds = seconds.replaceAll(",", ".");
if (checkIfNumber(seconds)) {
return Double.parseDouble(seconds);
}
return -1;
}
protected boolean checkIfNumber(String numberCandidate) {
if (numberCandidate == null || numberCandidate.equals("")) {
return false;
}
try {
Double.parseDouble(numberCandidate);
} catch (NumberFormatException e) {
return false;
}
return true;
}
protected double convertToGHz(double cycles, double seconds) {
double giga = 1000000000.0;
return (cycles / seconds) / giga;
}
} |
`throw missingFieldException` here as well? | private static RequiredPeerCredential toRequiredPeerCredential(RequiredCredential requiredCredential) {
if (requiredCredential.field == null) {
throw new IllegalArgumentException("field");
}
if (requiredCredential.matchExpression == null) {
throw new IllegalArgumentException("must-match");
}
return new RequiredPeerCredential(toField(requiredCredential.field), new HostGlobPattern(requiredCredential.matchExpression));
} | throw new IllegalArgumentException("field"); | private static RequiredPeerCredential toRequiredPeerCredential(RequiredCredential requiredCredential) {
if (requiredCredential.field == null) {
throw missingFieldException("field");
}
if (requiredCredential.matchExpression == null) {
throw missingFieldException("must-match");
}
return new RequiredPeerCredential(toField(requiredCredential.field), new HostGlobPattern(requiredCredential.matchExpression));
} | class TransportSecurityOptionsJsonSerializer {
private static final ObjectMapper mapper = new ObjectMapper();
public TransportSecurityOptions deserialize(InputStream in) {
try {
TransportSecurityOptionsEntity entity = mapper.readValue(in, TransportSecurityOptionsEntity.class);
return toTransportSecurityOptions(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static TransportSecurityOptions toTransportSecurityOptions(TransportSecurityOptionsEntity entity) {
TransportSecurityOptions.Builder builder = new TransportSecurityOptions.Builder();
Files files = entity.files;
if (files != null) {
if (files.certificatesFile != null && files.privateKeyFile != null) {
builder.withCertificate(Paths.get(files.certificatesFile), Paths.get(files.privateKeyFile));
} else if (files.certificatesFile != null || files.privateKeyFile != null) {
throw new IllegalArgumentException("Both 'private-key' and 'certificates' must be configured together");
}
if (files.caCertificatesFile != null) {
builder.withCaCertificate(Paths.get(files.caCertificatesFile));
}
}
List<AuthorizedPeer> authorizedPeersEntity = entity.authorizedPeers;
if (authorizedPeersEntity.size() > 0) {
builder.withAuthorizedPeers(new AuthorizedPeers(toPeerPolicies(authorizedPeersEntity)));
}
return builder.build();
}
private static Set<PeerPolicy> toPeerPolicies(List<AuthorizedPeer> authorizedPeersEntity) {
return authorizedPeersEntity.stream()
.map(TransportSecurityOptionsJsonSerializer::toPeerPolicy)
.collect(toSet());
}
private static PeerPolicy toPeerPolicy(AuthorizedPeer authorizedPeer) {
if (authorizedPeer.name == null) {
throw missingFieldException("name");
}
if (authorizedPeer.requiredCredentials.isEmpty()) {
throw missingFieldException("required-credentials");
}
return new PeerPolicy(authorizedPeer.name, toRequestPeerCredentials(authorizedPeer.requiredCredentials));
}
private static List<RequiredPeerCredential> toRequestPeerCredentials(List<RequiredCredential> requiredCredentials) {
return requiredCredentials.stream()
.map(TransportSecurityOptionsJsonSerializer::toRequiredPeerCredential)
.collect(toList());
}
private static RequiredPeerCredential.Field toField(CredentialField field) {
switch (field) {
case CN: return RequiredPeerCredential.Field.CN;
case SAN_DNS: return RequiredPeerCredential.Field.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static TransportSecurityOptionsEntity toTransportSecurityOptionsEntity(TransportSecurityOptions options) {
TransportSecurityOptionsEntity entity = new TransportSecurityOptionsEntity();
entity.files = new Files();
options.getCaCertificatesFile().ifPresent(value -> entity.files.caCertificatesFile = value.toString());
options.getCertificatesFile().ifPresent(value -> entity.files.certificatesFile = value.toString());
options.getPrivateKeyFile().ifPresent(value -> entity.files.privateKeyFile = value.toString());
options.getAuthorizedPeers().ifPresent( authorizedPeers -> {
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
AuthorizedPeer authorizedPeer = new AuthorizedPeer();
authorizedPeer.name = peerPolicy.peerName();
for (RequiredPeerCredential requiredPeerCredential : peerPolicy.requiredCredentials()) {
RequiredCredential requiredCredential = new RequiredCredential();
requiredCredential.field = toField(requiredPeerCredential.field());
requiredCredential.matchExpression = requiredPeerCredential.pattern().asString();
authorizedPeer.requiredCredentials.add(requiredCredential);
}
entity.authorizedPeers.add(authorizedPeer);
}
});
return entity;
}
private static CredentialField toField(RequiredPeerCredential.Field field) {
switch (field) {
case CN: return CredentialField.CN;
case SAN_DNS: return CredentialField.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static IllegalArgumentException missingFieldException(String fieldName) {
return new IllegalArgumentException(String.format("'%s' missing", fieldName));
}
} | class TransportSecurityOptionsJsonSerializer {
private static final ObjectMapper mapper = new ObjectMapper();
public TransportSecurityOptions deserialize(InputStream in) {
try {
TransportSecurityOptionsEntity entity = mapper.readValue(in, TransportSecurityOptionsEntity.class);
return toTransportSecurityOptions(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static TransportSecurityOptions toTransportSecurityOptions(TransportSecurityOptionsEntity entity) {
TransportSecurityOptions.Builder builder = new TransportSecurityOptions.Builder();
Files files = entity.files;
if (files != null) {
if (files.certificatesFile != null && files.privateKeyFile != null) {
builder.withCertificates(Paths.get(files.certificatesFile), Paths.get(files.privateKeyFile));
} else if (files.certificatesFile != null || files.privateKeyFile != null) {
throw new IllegalArgumentException("Both 'private-key' and 'certificates' must be configured together");
}
if (files.caCertificatesFile != null) {
builder.withCaCertificates(Paths.get(files.caCertificatesFile));
}
}
List<AuthorizedPeer> authorizedPeersEntity = entity.authorizedPeers;
if (authorizedPeersEntity.size() > 0) {
builder.withAuthorizedPeers(new AuthorizedPeers(toPeerPolicies(authorizedPeersEntity)));
}
return builder.build();
}
private static Set<PeerPolicy> toPeerPolicies(List<AuthorizedPeer> authorizedPeersEntity) {
return authorizedPeersEntity.stream()
.map(TransportSecurityOptionsJsonSerializer::toPeerPolicy)
.collect(toSet());
}
private static PeerPolicy toPeerPolicy(AuthorizedPeer authorizedPeer) {
if (authorizedPeer.name == null) {
throw missingFieldException("name");
}
if (authorizedPeer.requiredCredentials.isEmpty()) {
throw missingFieldException("required-credentials");
}
return new PeerPolicy(authorizedPeer.name, toRequestPeerCredentials(authorizedPeer.requiredCredentials));
}
private static List<RequiredPeerCredential> toRequestPeerCredentials(List<RequiredCredential> requiredCredentials) {
return requiredCredentials.stream()
.map(TransportSecurityOptionsJsonSerializer::toRequiredPeerCredential)
.collect(toList());
}
private static RequiredPeerCredential.Field toField(CredentialField field) {
switch (field) {
case CN: return RequiredPeerCredential.Field.CN;
case SAN_DNS: return RequiredPeerCredential.Field.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static TransportSecurityOptionsEntity toTransportSecurityOptionsEntity(TransportSecurityOptions options) {
TransportSecurityOptionsEntity entity = new TransportSecurityOptionsEntity();
entity.files = new Files();
options.getCaCertificatesFile().ifPresent(value -> entity.files.caCertificatesFile = value.toString());
options.getCertificatesFile().ifPresent(value -> entity.files.certificatesFile = value.toString());
options.getPrivateKeyFile().ifPresent(value -> entity.files.privateKeyFile = value.toString());
options.getAuthorizedPeers().ifPresent( authorizedPeers -> {
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
AuthorizedPeer authorizedPeer = new AuthorizedPeer();
authorizedPeer.name = peerPolicy.peerName();
for (RequiredPeerCredential requiredPeerCredential : peerPolicy.requiredCredentials()) {
RequiredCredential requiredCredential = new RequiredCredential();
requiredCredential.field = toField(requiredPeerCredential.field());
requiredCredential.matchExpression = requiredPeerCredential.pattern().asString();
authorizedPeer.requiredCredentials.add(requiredCredential);
}
entity.authorizedPeers.add(authorizedPeer);
}
});
return entity;
}
private static CredentialField toField(RequiredPeerCredential.Field field) {
switch (field) {
case CN: return CredentialField.CN;
case SAN_DNS: return CredentialField.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static IllegalArgumentException missingFieldException(String fieldName) {
return new IllegalArgumentException(String.format("'%s' missing", fieldName));
}
} |
Same as above. | private void applyAcl(ContainerName containerName, Acl acl) {
if (isAclActive(containerName, acl)) {
return;
}
final Command flush = new FlushCommand(Chain.INPUT);
final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT);
log.info("Start modifying ACL rules");
try {
log.info("Running ACL command '" + flush.asString());
dockerOperations.executeCommandInNetworkNamespace(containerName, flush.asArray(IPTABLES_COMMAND));
acl.toCommands().forEach(command -> {
log.info("Running ACL command '" + command.asString());
dockerOperations.executeCommandInNetworkNamespace(containerName,
command.asArray(IPTABLES_COMMAND));
});
containerAcls.put(containerName, acl);
} catch (Exception e) {
log.error("Exception occurred while configuring ACLs, attempting rollback", e);
try {
dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND));
} catch (Exception ne) {
log.error("Rollback failed, giving up", ne);
}
}
log.info("Finished modifying ACL rules");
} | log.info("Running ACL command '" + command.asString()); | private void applyAcl(ContainerName containerName, Acl acl) {
if (isAclActive(containerName, acl)) {
return;
}
final Command flush = new FlushCommand(Chain.INPUT);
final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT);
log.info("Start modifying ACL rules");
try {
log.info("Running ACL command '" + flush.asString() + "'");
dockerOperations.executeCommandInNetworkNamespace(containerName, flush.asArray(IPTABLES_COMMAND));
acl.toCommands().forEach(command -> {
log.info("Running ACL command '" + command.asString() + "'");
dockerOperations.executeCommandInNetworkNamespace(containerName,
command.asArray(IPTABLES_COMMAND));
});
containerAcls.put(containerName, acl);
} catch (Exception e) {
log.error("Exception occurred while configuring ACLs, attempting rollback", e);
try {
dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND));
} catch (Exception ne) {
log.error("Rollback failed, giving up", ne);
}
}
log.info("Finished modifying ACL rules");
} | class AclMaintainer implements Runnable {
private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class);
private static final String IPTABLES_COMMAND = "ip6tables";
private final DockerOperations dockerOperations;
private final NodeRepository nodeRepository;
private final String nodeAdminHostname;
private final Map<ContainerName, Acl> containerAcls;
public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository,
String nodeAdminHostname) {
this.dockerOperations = dockerOperations;
this.nodeRepository = nodeRepository;
this.nodeAdminHostname = nodeAdminHostname;
this.containerAcls = new HashMap<>();
}
private boolean isAclActive(ContainerName containerName, Acl acl) {
return Optional.ofNullable(containerAcls.get(containerName))
.map(acl::equals)
.orElse(false);
}
private synchronized void configureAcls() {
final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository
.getContainerAclSpecs(nodeAdminHostname).stream()
.collect(Collectors.groupingBy(ContainerAclSpec::trustedBy));
dockerOperations
.getAllManagedContainers().stream()
.filter(container -> container.state.isRunning())
.map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name)))
.filter(pair -> pair.getSecond() != null)
.forEach(pair ->
applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond())));
}
@Override
public void run() {
try {
configureAcls();
} catch (Throwable t) {
log.error("Failed to configure ACLs", t);
}
}
} | class AclMaintainer implements Runnable {
private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class);
private static final String IPTABLES_COMMAND = "ip6tables";
private final DockerOperations dockerOperations;
private final NodeRepository nodeRepository;
private final String nodeAdminHostname;
private final Map<ContainerName, Acl> containerAcls;
public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository,
String nodeAdminHostname) {
this.dockerOperations = dockerOperations;
this.nodeRepository = nodeRepository;
this.nodeAdminHostname = nodeAdminHostname;
this.containerAcls = new HashMap<>();
}
private boolean isAclActive(ContainerName containerName, Acl acl) {
return Optional.ofNullable(containerAcls.get(containerName))
.map(acl::equals)
.orElse(false);
}
private synchronized void configureAcls() {
final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository
.getContainerAclSpecs(nodeAdminHostname).stream()
.collect(Collectors.groupingBy(ContainerAclSpec::trustedBy));
dockerOperations
.getAllManagedContainers().stream()
.filter(container -> container.state.isRunning())
.map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name)))
.filter(pair -> pair.getSecond() != null)
.forEach(pair ->
applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond())));
}
@Override
public void run() {
try {
configureAcls();
} catch (Throwable t) {
log.error("Failed to configure ACLs", t);
}
}
} |
Missing `'` at the end? | private void applyAcl(ContainerName containerName, Acl acl) {
if (isAclActive(containerName, acl)) {
return;
}
final Command flush = new FlushCommand(Chain.INPUT);
final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT);
log.info("Start modifying ACL rules");
try {
log.info("Running ACL command '" + flush.asString());
dockerOperations.executeCommandInNetworkNamespace(containerName, flush.asArray(IPTABLES_COMMAND));
acl.toCommands().forEach(command -> {
log.info("Running ACL command '" + command.asString());
dockerOperations.executeCommandInNetworkNamespace(containerName,
command.asArray(IPTABLES_COMMAND));
});
containerAcls.put(containerName, acl);
} catch (Exception e) {
log.error("Exception occurred while configuring ACLs, attempting rollback", e);
try {
dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND));
} catch (Exception ne) {
log.error("Rollback failed, giving up", ne);
}
}
log.info("Finished modifying ACL rules");
} | log.info("Running ACL command '" + flush.asString()); | private void applyAcl(ContainerName containerName, Acl acl) {
if (isAclActive(containerName, acl)) {
return;
}
final Command flush = new FlushCommand(Chain.INPUT);
final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT);
log.info("Start modifying ACL rules");
try {
log.info("Running ACL command '" + flush.asString() + "'");
dockerOperations.executeCommandInNetworkNamespace(containerName, flush.asArray(IPTABLES_COMMAND));
acl.toCommands().forEach(command -> {
log.info("Running ACL command '" + command.asString() + "'");
dockerOperations.executeCommandInNetworkNamespace(containerName,
command.asArray(IPTABLES_COMMAND));
});
containerAcls.put(containerName, acl);
} catch (Exception e) {
log.error("Exception occurred while configuring ACLs, attempting rollback", e);
try {
dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND));
} catch (Exception ne) {
log.error("Rollback failed, giving up", ne);
}
}
log.info("Finished modifying ACL rules");
} | class AclMaintainer implements Runnable {
private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class);
private static final String IPTABLES_COMMAND = "ip6tables";
private final DockerOperations dockerOperations;
private final NodeRepository nodeRepository;
private final String nodeAdminHostname;
private final Map<ContainerName, Acl> containerAcls;
public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository,
String nodeAdminHostname) {
this.dockerOperations = dockerOperations;
this.nodeRepository = nodeRepository;
this.nodeAdminHostname = nodeAdminHostname;
this.containerAcls = new HashMap<>();
}
private boolean isAclActive(ContainerName containerName, Acl acl) {
return Optional.ofNullable(containerAcls.get(containerName))
.map(acl::equals)
.orElse(false);
}
private synchronized void configureAcls() {
final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository
.getContainerAclSpecs(nodeAdminHostname).stream()
.collect(Collectors.groupingBy(ContainerAclSpec::trustedBy));
dockerOperations
.getAllManagedContainers().stream()
.filter(container -> container.state.isRunning())
.map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name)))
.filter(pair -> pair.getSecond() != null)
.forEach(pair ->
applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond())));
}
@Override
public void run() {
try {
configureAcls();
} catch (Throwable t) {
log.error("Failed to configure ACLs", t);
}
}
} | class AclMaintainer implements Runnable {
private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class);
private static final String IPTABLES_COMMAND = "ip6tables";
private final DockerOperations dockerOperations;
private final NodeRepository nodeRepository;
private final String nodeAdminHostname;
private final Map<ContainerName, Acl> containerAcls;
public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository,
String nodeAdminHostname) {
this.dockerOperations = dockerOperations;
this.nodeRepository = nodeRepository;
this.nodeAdminHostname = nodeAdminHostname;
this.containerAcls = new HashMap<>();
}
private boolean isAclActive(ContainerName containerName, Acl acl) {
return Optional.ofNullable(containerAcls.get(containerName))
.map(acl::equals)
.orElse(false);
}
private synchronized void configureAcls() {
final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository
.getContainerAclSpecs(nodeAdminHostname).stream()
.collect(Collectors.groupingBy(ContainerAclSpec::trustedBy));
dockerOperations
.getAllManagedContainers().stream()
.filter(container -> container.state.isRunning())
.map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name)))
.filter(pair -> pair.getSecond() != null)
.forEach(pair ->
applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond())));
}
@Override
public void run() {
try {
configureAcls();
} catch (Throwable t) {
log.error("Failed to configure ACLs", t);
}
}
} |
Yes, will fix | private void applyAcl(ContainerName containerName, Acl acl) {
if (isAclActive(containerName, acl)) {
return;
}
final Command flush = new FlushCommand(Chain.INPUT);
final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT);
log.info("Start modifying ACL rules");
try {
log.info("Running ACL command '" + flush.asString());
dockerOperations.executeCommandInNetworkNamespace(containerName, flush.asArray(IPTABLES_COMMAND));
acl.toCommands().forEach(command -> {
log.info("Running ACL command '" + command.asString());
dockerOperations.executeCommandInNetworkNamespace(containerName,
command.asArray(IPTABLES_COMMAND));
});
containerAcls.put(containerName, acl);
} catch (Exception e) {
log.error("Exception occurred while configuring ACLs, attempting rollback", e);
try {
dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND));
} catch (Exception ne) {
log.error("Rollback failed, giving up", ne);
}
}
log.info("Finished modifying ACL rules");
} | log.info("Running ACL command '" + flush.asString()); | private void applyAcl(ContainerName containerName, Acl acl) {
if (isAclActive(containerName, acl)) {
return;
}
final Command flush = new FlushCommand(Chain.INPUT);
final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT);
log.info("Start modifying ACL rules");
try {
log.info("Running ACL command '" + flush.asString() + "'");
dockerOperations.executeCommandInNetworkNamespace(containerName, flush.asArray(IPTABLES_COMMAND));
acl.toCommands().forEach(command -> {
log.info("Running ACL command '" + command.asString() + "'");
dockerOperations.executeCommandInNetworkNamespace(containerName,
command.asArray(IPTABLES_COMMAND));
});
containerAcls.put(containerName, acl);
} catch (Exception e) {
log.error("Exception occurred while configuring ACLs, attempting rollback", e);
try {
dockerOperations.executeCommandInNetworkNamespace(containerName, rollback.asArray(IPTABLES_COMMAND));
} catch (Exception ne) {
log.error("Rollback failed, giving up", ne);
}
}
log.info("Finished modifying ACL rules");
} | class AclMaintainer implements Runnable {
private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class);
private static final String IPTABLES_COMMAND = "ip6tables";
private final DockerOperations dockerOperations;
private final NodeRepository nodeRepository;
private final String nodeAdminHostname;
private final Map<ContainerName, Acl> containerAcls;
public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository,
String nodeAdminHostname) {
this.dockerOperations = dockerOperations;
this.nodeRepository = nodeRepository;
this.nodeAdminHostname = nodeAdminHostname;
this.containerAcls = new HashMap<>();
}
private boolean isAclActive(ContainerName containerName, Acl acl) {
return Optional.ofNullable(containerAcls.get(containerName))
.map(acl::equals)
.orElse(false);
}
private synchronized void configureAcls() {
final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository
.getContainerAclSpecs(nodeAdminHostname).stream()
.collect(Collectors.groupingBy(ContainerAclSpec::trustedBy));
dockerOperations
.getAllManagedContainers().stream()
.filter(container -> container.state.isRunning())
.map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name)))
.filter(pair -> pair.getSecond() != null)
.forEach(pair ->
applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond())));
}
@Override
public void run() {
try {
configureAcls();
} catch (Throwable t) {
log.error("Failed to configure ACLs", t);
}
}
} | class AclMaintainer implements Runnable {
private static final PrefixLogger log = PrefixLogger.getNodeAdminLogger(AclMaintainer.class);
private static final String IPTABLES_COMMAND = "ip6tables";
private final DockerOperations dockerOperations;
private final NodeRepository nodeRepository;
private final String nodeAdminHostname;
private final Map<ContainerName, Acl> containerAcls;
public AclMaintainer(DockerOperations dockerOperations, NodeRepository nodeRepository,
String nodeAdminHostname) {
this.dockerOperations = dockerOperations;
this.nodeRepository = nodeRepository;
this.nodeAdminHostname = nodeAdminHostname;
this.containerAcls = new HashMap<>();
}
private boolean isAclActive(ContainerName containerName, Acl acl) {
return Optional.ofNullable(containerAcls.get(containerName))
.map(acl::equals)
.orElse(false);
}
private synchronized void configureAcls() {
final Map<ContainerName, List<ContainerAclSpec>> aclSpecsGroupedByContainerName = nodeRepository
.getContainerAclSpecs(nodeAdminHostname).stream()
.collect(Collectors.groupingBy(ContainerAclSpec::trustedBy));
dockerOperations
.getAllManagedContainers().stream()
.filter(container -> container.state.isRunning())
.map(container -> new Pair<>(container, aclSpecsGroupedByContainerName.get(container.name)))
.filter(pair -> pair.getSecond() != null)
.forEach(pair ->
applyAcl(pair.getFirst().name, new Acl(pair.getFirst().pid, pair.getSecond())));
}
@Override
public void run() {
try {
configureAcls();
} catch (Throwable t) {
log.error("Failed to configure ACLs", t);
}
}
} |
Also here | private static RequiredPeerCredential toRequiredPeerCredential(RequiredCredential requiredCredential) {
if (requiredCredential.field == null) {
throw new IllegalArgumentException("field");
}
if (requiredCredential.matchExpression == null) {
throw new IllegalArgumentException("must-match");
}
return new RequiredPeerCredential(toField(requiredCredential.field), new HostGlobPattern(requiredCredential.matchExpression));
} | throw new IllegalArgumentException("must-match"); | private static RequiredPeerCredential toRequiredPeerCredential(RequiredCredential requiredCredential) {
if (requiredCredential.field == null) {
throw missingFieldException("field");
}
if (requiredCredential.matchExpression == null) {
throw missingFieldException("must-match");
}
return new RequiredPeerCredential(toField(requiredCredential.field), new HostGlobPattern(requiredCredential.matchExpression));
} | class TransportSecurityOptionsJsonSerializer {
private static final ObjectMapper mapper = new ObjectMapper();
public TransportSecurityOptions deserialize(InputStream in) {
try {
TransportSecurityOptionsEntity entity = mapper.readValue(in, TransportSecurityOptionsEntity.class);
return toTransportSecurityOptions(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static TransportSecurityOptions toTransportSecurityOptions(TransportSecurityOptionsEntity entity) {
TransportSecurityOptions.Builder builder = new TransportSecurityOptions.Builder();
Files files = entity.files;
if (files != null) {
if (files.certificatesFile != null && files.privateKeyFile != null) {
builder.withCertificate(Paths.get(files.certificatesFile), Paths.get(files.privateKeyFile));
} else if (files.certificatesFile != null || files.privateKeyFile != null) {
throw new IllegalArgumentException("Both 'private-key' and 'certificates' must be configured together");
}
if (files.caCertificatesFile != null) {
builder.withCaCertificate(Paths.get(files.caCertificatesFile));
}
}
List<AuthorizedPeer> authorizedPeersEntity = entity.authorizedPeers;
if (authorizedPeersEntity.size() > 0) {
builder.withAuthorizedPeers(new AuthorizedPeers(toPeerPolicies(authorizedPeersEntity)));
}
return builder.build();
}
private static Set<PeerPolicy> toPeerPolicies(List<AuthorizedPeer> authorizedPeersEntity) {
return authorizedPeersEntity.stream()
.map(TransportSecurityOptionsJsonSerializer::toPeerPolicy)
.collect(toSet());
}
private static PeerPolicy toPeerPolicy(AuthorizedPeer authorizedPeer) {
if (authorizedPeer.name == null) {
throw missingFieldException("name");
}
if (authorizedPeer.requiredCredentials.isEmpty()) {
throw missingFieldException("required-credentials");
}
return new PeerPolicy(authorizedPeer.name, toRequestPeerCredentials(authorizedPeer.requiredCredentials));
}
private static List<RequiredPeerCredential> toRequestPeerCredentials(List<RequiredCredential> requiredCredentials) {
return requiredCredentials.stream()
.map(TransportSecurityOptionsJsonSerializer::toRequiredPeerCredential)
.collect(toList());
}
private static RequiredPeerCredential.Field toField(CredentialField field) {
switch (field) {
case CN: return RequiredPeerCredential.Field.CN;
case SAN_DNS: return RequiredPeerCredential.Field.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static TransportSecurityOptionsEntity toTransportSecurityOptionsEntity(TransportSecurityOptions options) {
TransportSecurityOptionsEntity entity = new TransportSecurityOptionsEntity();
entity.files = new Files();
options.getCaCertificatesFile().ifPresent(value -> entity.files.caCertificatesFile = value.toString());
options.getCertificatesFile().ifPresent(value -> entity.files.certificatesFile = value.toString());
options.getPrivateKeyFile().ifPresent(value -> entity.files.privateKeyFile = value.toString());
options.getAuthorizedPeers().ifPresent( authorizedPeers -> {
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
AuthorizedPeer authorizedPeer = new AuthorizedPeer();
authorizedPeer.name = peerPolicy.peerName();
for (RequiredPeerCredential requiredPeerCredential : peerPolicy.requiredCredentials()) {
RequiredCredential requiredCredential = new RequiredCredential();
requiredCredential.field = toField(requiredPeerCredential.field());
requiredCredential.matchExpression = requiredPeerCredential.pattern().asString();
authorizedPeer.requiredCredentials.add(requiredCredential);
}
entity.authorizedPeers.add(authorizedPeer);
}
});
return entity;
}
private static CredentialField toField(RequiredPeerCredential.Field field) {
switch (field) {
case CN: return CredentialField.CN;
case SAN_DNS: return CredentialField.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static IllegalArgumentException missingFieldException(String fieldName) {
return new IllegalArgumentException(String.format("'%s' missing", fieldName));
}
} | class TransportSecurityOptionsJsonSerializer {
private static final ObjectMapper mapper = new ObjectMapper();
public TransportSecurityOptions deserialize(InputStream in) {
try {
TransportSecurityOptionsEntity entity = mapper.readValue(in, TransportSecurityOptionsEntity.class);
return toTransportSecurityOptions(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private static TransportSecurityOptions toTransportSecurityOptions(TransportSecurityOptionsEntity entity) {
TransportSecurityOptions.Builder builder = new TransportSecurityOptions.Builder();
Files files = entity.files;
if (files != null) {
if (files.certificatesFile != null && files.privateKeyFile != null) {
builder.withCertificates(Paths.get(files.certificatesFile), Paths.get(files.privateKeyFile));
} else if (files.certificatesFile != null || files.privateKeyFile != null) {
throw new IllegalArgumentException("Both 'private-key' and 'certificates' must be configured together");
}
if (files.caCertificatesFile != null) {
builder.withCaCertificates(Paths.get(files.caCertificatesFile));
}
}
List<AuthorizedPeer> authorizedPeersEntity = entity.authorizedPeers;
if (authorizedPeersEntity.size() > 0) {
builder.withAuthorizedPeers(new AuthorizedPeers(toPeerPolicies(authorizedPeersEntity)));
}
return builder.build();
}
private static Set<PeerPolicy> toPeerPolicies(List<AuthorizedPeer> authorizedPeersEntity) {
return authorizedPeersEntity.stream()
.map(TransportSecurityOptionsJsonSerializer::toPeerPolicy)
.collect(toSet());
}
private static PeerPolicy toPeerPolicy(AuthorizedPeer authorizedPeer) {
if (authorizedPeer.name == null) {
throw missingFieldException("name");
}
if (authorizedPeer.requiredCredentials.isEmpty()) {
throw missingFieldException("required-credentials");
}
return new PeerPolicy(authorizedPeer.name, toRequestPeerCredentials(authorizedPeer.requiredCredentials));
}
private static List<RequiredPeerCredential> toRequestPeerCredentials(List<RequiredCredential> requiredCredentials) {
return requiredCredentials.stream()
.map(TransportSecurityOptionsJsonSerializer::toRequiredPeerCredential)
.collect(toList());
}
private static RequiredPeerCredential.Field toField(CredentialField field) {
switch (field) {
case CN: return RequiredPeerCredential.Field.CN;
case SAN_DNS: return RequiredPeerCredential.Field.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static TransportSecurityOptionsEntity toTransportSecurityOptionsEntity(TransportSecurityOptions options) {
TransportSecurityOptionsEntity entity = new TransportSecurityOptionsEntity();
entity.files = new Files();
options.getCaCertificatesFile().ifPresent(value -> entity.files.caCertificatesFile = value.toString());
options.getCertificatesFile().ifPresent(value -> entity.files.certificatesFile = value.toString());
options.getPrivateKeyFile().ifPresent(value -> entity.files.privateKeyFile = value.toString());
options.getAuthorizedPeers().ifPresent( authorizedPeers -> {
for (PeerPolicy peerPolicy : authorizedPeers.peerPolicies()) {
AuthorizedPeer authorizedPeer = new AuthorizedPeer();
authorizedPeer.name = peerPolicy.peerName();
for (RequiredPeerCredential requiredPeerCredential : peerPolicy.requiredCredentials()) {
RequiredCredential requiredCredential = new RequiredCredential();
requiredCredential.field = toField(requiredPeerCredential.field());
requiredCredential.matchExpression = requiredPeerCredential.pattern().asString();
authorizedPeer.requiredCredentials.add(requiredCredential);
}
entity.authorizedPeers.add(authorizedPeer);
}
});
return entity;
}
private static CredentialField toField(RequiredPeerCredential.Field field) {
switch (field) {
case CN: return CredentialField.CN;
case SAN_DNS: return CredentialField.SAN_DNS;
default: throw new IllegalArgumentException("Invalid field type: " + field);
}
}
private static IllegalArgumentException missingFieldException(String fieldName) {
return new IllegalArgumentException(String.format("'%s' missing", fieldName));
}
} |
I think the de-facto standard is to use LogLevel.DEBUG | private void warnOnStaleContainers() {
log.log(Level.FINE, "Checking for stale containers");
try {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
if (snapshot.isEmpty()) return;
logWarning(snapshot);
} catch (Throwable t) {
log.log(Level.WARNING, "Watchdog task died!", t);
}
} | log.log(Level.FINE, "Checking for stale containers"); | private void warnOnStaleContainers() {
log.log(Level.FINE, "Checking for stale containers");
try {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
if (snapshot.isEmpty()) return;
logWarning(snapshot);
} catch (Throwable t) {
log.log(Level.WARNING, "Watchdog task died!", t);
}
} | class ActiveContainerDeactivationWatchdog implements ActiveContainerMetrics, AutoCloseable {
static final Duration WATCHDOG_FREQUENCY = Duration.ofMinutes(20);
static final Duration ACTIVE_CONTAINER_GRACE_PERIOD = Duration.ofHours(1);
static final Duration GC_TRIGGER_FREQUENCY = ACTIVE_CONTAINER_GRACE_PERIOD.minusMinutes(5);
static final Duration ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY = Duration.ofMinutes(5);
private static final Logger log = Logger.getLogger(ActiveContainerDeactivationWatchdog.class.getName());
private final Object monitor = new Object();
private final WeakHashMap<ActiveContainer, LifecycleStats> deactivatedContainers = new WeakHashMap<>();
private final ReferenceQueue<ActiveContainer> garbageCollectedContainers = new ReferenceQueue<>();
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
private final Set<ActiveContainerPhantomReference> destructorReferences = new HashSet<>();
private final ScheduledExecutorService scheduler;
private final Clock clock;
private ActiveContainer currentContainer;
private Instant currentContainerActivationTime;
@Inject
ActiveContainerDeactivationWatchdog() {
this(
Clock.systemUTC(),
new ScheduledThreadPoolExecutor(1, runnable -> {
Thread thread = new Thread(runnable, "active-container-deactivation-watchdog");
thread.setDaemon(true);
return thread;
}));
}
ActiveContainerDeactivationWatchdog(Clock clock, ScheduledExecutorService scheduler) {
this.clock = clock;
this.scheduler = scheduler;
this.scheduler.scheduleAtFixedRate(this::warnOnStaleContainers,
WATCHDOG_FREQUENCY.getSeconds(),
WATCHDOG_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(ActiveContainerDeactivationWatchdog::triggerGc,
GC_TRIGGER_FREQUENCY.getSeconds(),
GC_TRIGGER_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(this::enforceDestructionOfGarbageCollectedContainers,
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
}
void onContainerActivation(ActiveContainer nextContainer) {
synchronized (monitor) {
Instant now = clock.instant();
ActiveContainer previousContainer = currentContainer;
currentContainer = nextContainer;
currentContainerActivationTime = now;
if (previousContainer != null) {
deactivatedContainers.put(previousContainer, new LifecycleStats(currentContainerActivationTime, now));
destructorReferences.add(new ActiveContainerPhantomReference(previousContainer, garbageCollectedContainers));
}
}
}
@Override
public void emitMetrics(Metric metric) {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
long containersWithRetainedRefsCount = snapshot.stream()
.filter(c -> c.activeContainer.retainCount() > 0)
.count();
metric.set(TOTAL_DEACTIVATED_CONTAINERS, snapshot.size(), null);
metric.set(DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, containersWithRetainedRefsCount, null);
}
@Override
public void close() {
synchronized (monitor) {
scheduler.shutdown();
deactivatedContainers.clear();
destructorReferences.clear();
currentContainer = null;
currentContainerActivationTime = null;
}
}
private static void triggerGc() {
log.log(Level.FINE, "Triggering GC");
System.gc();
System.runFinalization();
}
private void enforceDestructionOfGarbageCollectedContainers() {
log.log(Level.FINE, "Enforcing destruction of GCed containers");
ActiveContainerPhantomReference reference;
while ((reference = (ActiveContainerPhantomReference) garbageCollectedContainers.poll()) != null) {
try {
reference.enforceDestruction();
} catch (Throwable t) {
log.log(Level.SEVERE, "Failed to do post-GC destruction of " + reference.containerName, t);
} finally {
destructorReferences.remove(reference);
reference.clear();
}
}
}
private List<DeactivatedContainer> getDeactivatedContainersSnapshot() {
Instant now = clock.instant();
synchronized (monitor) {
return deactivatedContainers.entrySet().stream()
.filter(e -> e.getValue().isPastGracePeriod(now))
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue()))
.sorted(comparing(e -> e.lifecycleStats.timeActivated))
.collect(toList());
}
}
private static void logWarning(List<DeactivatedContainer> snapshot) {
log.warning(String.format("%s instances of deactivated containers are still alive.", snapshot.size()));
for (DeactivatedContainer deactivatedContainer : snapshot) {
log.warning(" - " + deactivatedContainer.toSummaryString());
}
}
private static class LifecycleStats {
public final Instant timeActivated;
public final Instant timeDeactivated;
public LifecycleStats(Instant timeActivated, Instant timeDeactivated) {
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public boolean isPastGracePeriod(Instant instant) {
return timeDeactivated.plus(ACTIVE_CONTAINER_GRACE_PERIOD).isBefore(instant);
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final LifecycleStats lifecycleStats;
public DeactivatedContainer(ActiveContainer activeContainer, LifecycleStats lifecycleStats) {
this.activeContainer = activeContainer;
this.lifecycleStats = lifecycleStats;
}
public String toSummaryString() {
return String.format("%s: time activated = %s, time deactivated = %s, reference count = %d",
activeContainer.toString(),
lifecycleStats.timeActivated.toString(),
lifecycleStats.timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class ActiveContainerPhantomReference extends PhantomReference<ActiveContainer> {
public final String containerName;
private final ActiveContainer.Destructor destructor;
public ActiveContainerPhantomReference(ActiveContainer activeContainer,
ReferenceQueue<? super ActiveContainer> q) {
super(activeContainer, q);
this.containerName = activeContainer.toString();
this.destructor = activeContainer.destructor;
}
public void enforceDestruction() {
boolean alreadyCompleted = destructor.destruct();
if (!alreadyCompleted) {
log.severe(containerName + " was not correctly cleaned up " +
"because of a resource leak or invalid use of reference counting.");
}
}
}
} | class ActiveContainerDeactivationWatchdog implements ActiveContainerMetrics, AutoCloseable {
static final Duration WATCHDOG_FREQUENCY = Duration.ofMinutes(20);
static final Duration ACTIVE_CONTAINER_GRACE_PERIOD = Duration.ofHours(1);
static final Duration GC_TRIGGER_FREQUENCY = ACTIVE_CONTAINER_GRACE_PERIOD.minusMinutes(5);
static final Duration ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY = Duration.ofMinutes(5);
private static final Logger log = Logger.getLogger(ActiveContainerDeactivationWatchdog.class.getName());
private final Object monitor = new Object();
private final WeakHashMap<ActiveContainer, LifecycleStats> deactivatedContainers = new WeakHashMap<>();
private final ReferenceQueue<ActiveContainer> garbageCollectedContainers = new ReferenceQueue<>();
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
private final Set<ActiveContainerPhantomReference> destructorReferences = new HashSet<>();
private final ScheduledExecutorService scheduler;
private final Clock clock;
private ActiveContainer currentContainer;
private Instant currentContainerActivationTime;
@Inject
ActiveContainerDeactivationWatchdog() {
this(
Clock.systemUTC(),
new ScheduledThreadPoolExecutor(1, runnable -> {
Thread thread = new Thread(runnable, "active-container-deactivation-watchdog");
thread.setDaemon(true);
return thread;
}));
}
ActiveContainerDeactivationWatchdog(Clock clock, ScheduledExecutorService scheduler) {
this.clock = clock;
this.scheduler = scheduler;
this.scheduler.scheduleAtFixedRate(this::warnOnStaleContainers,
WATCHDOG_FREQUENCY.getSeconds(),
WATCHDOG_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(ActiveContainerDeactivationWatchdog::triggerGc,
GC_TRIGGER_FREQUENCY.getSeconds(),
GC_TRIGGER_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(this::enforceDestructionOfGarbageCollectedContainers,
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
}
void onContainerActivation(ActiveContainer nextContainer) {
synchronized (monitor) {
Instant now = clock.instant();
ActiveContainer previousContainer = currentContainer;
currentContainer = nextContainer;
currentContainerActivationTime = now;
if (previousContainer != null) {
deactivatedContainers.put(previousContainer, new LifecycleStats(currentContainerActivationTime, now));
destructorReferences.add(new ActiveContainerPhantomReference(previousContainer, garbageCollectedContainers));
}
}
}
@Override
public void emitMetrics(Metric metric) {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
long containersWithRetainedRefsCount = snapshot.stream()
.filter(c -> c.activeContainer.retainCount() > 0)
.count();
metric.set(TOTAL_DEACTIVATED_CONTAINERS, snapshot.size(), null);
metric.set(DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, containersWithRetainedRefsCount, null);
}
@Override
public void close() {
synchronized (monitor) {
scheduler.shutdown();
deactivatedContainers.clear();
destructorReferences.clear();
currentContainer = null;
currentContainerActivationTime = null;
}
}
private static void triggerGc() {
log.log(Level.FINE, "Triggering GC");
System.gc();
System.runFinalization();
}
private void enforceDestructionOfGarbageCollectedContainers() {
log.log(Level.FINE, "Enforcing destruction of GCed containers");
ActiveContainerPhantomReference reference;
while ((reference = (ActiveContainerPhantomReference) garbageCollectedContainers.poll()) != null) {
try {
reference.enforceDestruction();
} catch (Throwable t) {
log.log(Level.SEVERE, "Failed to do post-GC destruction of " + reference.containerName, t);
} finally {
destructorReferences.remove(reference);
reference.clear();
}
}
}
private List<DeactivatedContainer> getDeactivatedContainersSnapshot() {
Instant now = clock.instant();
synchronized (monitor) {
return deactivatedContainers.entrySet().stream()
.filter(e -> e.getValue().isPastGracePeriod(now))
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue()))
.sorted(comparing(e -> e.lifecycleStats.timeActivated))
.collect(toList());
}
}
private static void logWarning(List<DeactivatedContainer> snapshot) {
log.warning(String.format("%s instances of deactivated containers are still alive.", snapshot.size()));
for (DeactivatedContainer deactivatedContainer : snapshot) {
log.warning(" - " + deactivatedContainer.toSummaryString());
}
}
private static class LifecycleStats {
public final Instant timeActivated;
public final Instant timeDeactivated;
public LifecycleStats(Instant timeActivated, Instant timeDeactivated) {
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public boolean isPastGracePeriod(Instant instant) {
return timeDeactivated.plus(ACTIVE_CONTAINER_GRACE_PERIOD).isBefore(instant);
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final LifecycleStats lifecycleStats;
public DeactivatedContainer(ActiveContainer activeContainer, LifecycleStats lifecycleStats) {
this.activeContainer = activeContainer;
this.lifecycleStats = lifecycleStats;
}
public String toSummaryString() {
return String.format("%s: time activated = %s, time deactivated = %s, reference count = %d",
activeContainer.toString(),
lifecycleStats.timeActivated.toString(),
lifecycleStats.timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class ActiveContainerPhantomReference extends PhantomReference<ActiveContainer> {
public final String containerName;
private final ActiveContainer.Destructor destructor;
public ActiveContainerPhantomReference(ActiveContainer activeContainer,
ReferenceQueue<? super ActiveContainer> q) {
super(activeContainer, q);
this.containerName = activeContainer.toString();
this.destructor = activeContainer.destructor;
}
public void enforceDestruction() {
boolean alreadyCompleted = destructor.destruct();
if (!alreadyCompleted) {
log.severe(containerName + " was not correctly cleaned up " +
"because of a resource leak or invalid use of reference counting.");
}
}
}
} |
`LogLevel` is not accessible as `vespalog` is not a dependency of `jdisc_core`. | private void warnOnStaleContainers() {
log.log(Level.FINE, "Checking for stale containers");
try {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
if (snapshot.isEmpty()) return;
logWarning(snapshot);
} catch (Throwable t) {
log.log(Level.WARNING, "Watchdog task died!", t);
}
} | log.log(Level.FINE, "Checking for stale containers"); | private void warnOnStaleContainers() {
log.log(Level.FINE, "Checking for stale containers");
try {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
if (snapshot.isEmpty()) return;
logWarning(snapshot);
} catch (Throwable t) {
log.log(Level.WARNING, "Watchdog task died!", t);
}
} | class ActiveContainerDeactivationWatchdog implements ActiveContainerMetrics, AutoCloseable {
static final Duration WATCHDOG_FREQUENCY = Duration.ofMinutes(20);
static final Duration ACTIVE_CONTAINER_GRACE_PERIOD = Duration.ofHours(1);
static final Duration GC_TRIGGER_FREQUENCY = ACTIVE_CONTAINER_GRACE_PERIOD.minusMinutes(5);
static final Duration ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY = Duration.ofMinutes(5);
private static final Logger log = Logger.getLogger(ActiveContainerDeactivationWatchdog.class.getName());
private final Object monitor = new Object();
private final WeakHashMap<ActiveContainer, LifecycleStats> deactivatedContainers = new WeakHashMap<>();
private final ReferenceQueue<ActiveContainer> garbageCollectedContainers = new ReferenceQueue<>();
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
private final Set<ActiveContainerPhantomReference> destructorReferences = new HashSet<>();
private final ScheduledExecutorService scheduler;
private final Clock clock;
private ActiveContainer currentContainer;
private Instant currentContainerActivationTime;
@Inject
ActiveContainerDeactivationWatchdog() {
this(
Clock.systemUTC(),
new ScheduledThreadPoolExecutor(1, runnable -> {
Thread thread = new Thread(runnable, "active-container-deactivation-watchdog");
thread.setDaemon(true);
return thread;
}));
}
ActiveContainerDeactivationWatchdog(Clock clock, ScheduledExecutorService scheduler) {
this.clock = clock;
this.scheduler = scheduler;
this.scheduler.scheduleAtFixedRate(this::warnOnStaleContainers,
WATCHDOG_FREQUENCY.getSeconds(),
WATCHDOG_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(ActiveContainerDeactivationWatchdog::triggerGc,
GC_TRIGGER_FREQUENCY.getSeconds(),
GC_TRIGGER_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(this::enforceDestructionOfGarbageCollectedContainers,
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
}
void onContainerActivation(ActiveContainer nextContainer) {
synchronized (monitor) {
Instant now = clock.instant();
ActiveContainer previousContainer = currentContainer;
currentContainer = nextContainer;
currentContainerActivationTime = now;
if (previousContainer != null) {
deactivatedContainers.put(previousContainer, new LifecycleStats(currentContainerActivationTime, now));
destructorReferences.add(new ActiveContainerPhantomReference(previousContainer, garbageCollectedContainers));
}
}
}
@Override
public void emitMetrics(Metric metric) {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
long containersWithRetainedRefsCount = snapshot.stream()
.filter(c -> c.activeContainer.retainCount() > 0)
.count();
metric.set(TOTAL_DEACTIVATED_CONTAINERS, snapshot.size(), null);
metric.set(DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, containersWithRetainedRefsCount, null);
}
@Override
public void close() {
synchronized (monitor) {
scheduler.shutdown();
deactivatedContainers.clear();
destructorReferences.clear();
currentContainer = null;
currentContainerActivationTime = null;
}
}
private static void triggerGc() {
log.log(Level.FINE, "Triggering GC");
System.gc();
System.runFinalization();
}
private void enforceDestructionOfGarbageCollectedContainers() {
log.log(Level.FINE, "Enforcing destruction of GCed containers");
ActiveContainerPhantomReference reference;
while ((reference = (ActiveContainerPhantomReference) garbageCollectedContainers.poll()) != null) {
try {
reference.enforceDestruction();
} catch (Throwable t) {
log.log(Level.SEVERE, "Failed to do post-GC destruction of " + reference.containerName, t);
} finally {
destructorReferences.remove(reference);
reference.clear();
}
}
}
private List<DeactivatedContainer> getDeactivatedContainersSnapshot() {
Instant now = clock.instant();
synchronized (monitor) {
return deactivatedContainers.entrySet().stream()
.filter(e -> e.getValue().isPastGracePeriod(now))
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue()))
.sorted(comparing(e -> e.lifecycleStats.timeActivated))
.collect(toList());
}
}
private static void logWarning(List<DeactivatedContainer> snapshot) {
log.warning(String.format("%s instances of deactivated containers are still alive.", snapshot.size()));
for (DeactivatedContainer deactivatedContainer : snapshot) {
log.warning(" - " + deactivatedContainer.toSummaryString());
}
}
private static class LifecycleStats {
public final Instant timeActivated;
public final Instant timeDeactivated;
public LifecycleStats(Instant timeActivated, Instant timeDeactivated) {
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public boolean isPastGracePeriod(Instant instant) {
return timeDeactivated.plus(ACTIVE_CONTAINER_GRACE_PERIOD).isBefore(instant);
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final LifecycleStats lifecycleStats;
public DeactivatedContainer(ActiveContainer activeContainer, LifecycleStats lifecycleStats) {
this.activeContainer = activeContainer;
this.lifecycleStats = lifecycleStats;
}
public String toSummaryString() {
return String.format("%s: time activated = %s, time deactivated = %s, reference count = %d",
activeContainer.toString(),
lifecycleStats.timeActivated.toString(),
lifecycleStats.timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class ActiveContainerPhantomReference extends PhantomReference<ActiveContainer> {
public final String containerName;
private final ActiveContainer.Destructor destructor;
public ActiveContainerPhantomReference(ActiveContainer activeContainer,
ReferenceQueue<? super ActiveContainer> q) {
super(activeContainer, q);
this.containerName = activeContainer.toString();
this.destructor = activeContainer.destructor;
}
public void enforceDestruction() {
boolean alreadyCompleted = destructor.destruct();
if (!alreadyCompleted) {
log.severe(containerName + " was not correctly cleaned up " +
"because of a resource leak or invalid use of reference counting.");
}
}
}
} | class ActiveContainerDeactivationWatchdog implements ActiveContainerMetrics, AutoCloseable {
static final Duration WATCHDOG_FREQUENCY = Duration.ofMinutes(20);
static final Duration ACTIVE_CONTAINER_GRACE_PERIOD = Duration.ofHours(1);
static final Duration GC_TRIGGER_FREQUENCY = ACTIVE_CONTAINER_GRACE_PERIOD.minusMinutes(5);
static final Duration ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY = Duration.ofMinutes(5);
private static final Logger log = Logger.getLogger(ActiveContainerDeactivationWatchdog.class.getName());
private final Object monitor = new Object();
private final WeakHashMap<ActiveContainer, LifecycleStats> deactivatedContainers = new WeakHashMap<>();
private final ReferenceQueue<ActiveContainer> garbageCollectedContainers = new ReferenceQueue<>();
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
private final Set<ActiveContainerPhantomReference> destructorReferences = new HashSet<>();
private final ScheduledExecutorService scheduler;
private final Clock clock;
private ActiveContainer currentContainer;
private Instant currentContainerActivationTime;
@Inject
ActiveContainerDeactivationWatchdog() {
this(
Clock.systemUTC(),
new ScheduledThreadPoolExecutor(1, runnable -> {
Thread thread = new Thread(runnable, "active-container-deactivation-watchdog");
thread.setDaemon(true);
return thread;
}));
}
ActiveContainerDeactivationWatchdog(Clock clock, ScheduledExecutorService scheduler) {
this.clock = clock;
this.scheduler = scheduler;
this.scheduler.scheduleAtFixedRate(this::warnOnStaleContainers,
WATCHDOG_FREQUENCY.getSeconds(),
WATCHDOG_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(ActiveContainerDeactivationWatchdog::triggerGc,
GC_TRIGGER_FREQUENCY.getSeconds(),
GC_TRIGGER_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(this::enforceDestructionOfGarbageCollectedContainers,
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
}
void onContainerActivation(ActiveContainer nextContainer) {
synchronized (monitor) {
Instant now = clock.instant();
ActiveContainer previousContainer = currentContainer;
currentContainer = nextContainer;
currentContainerActivationTime = now;
if (previousContainer != null) {
deactivatedContainers.put(previousContainer, new LifecycleStats(currentContainerActivationTime, now));
destructorReferences.add(new ActiveContainerPhantomReference(previousContainer, garbageCollectedContainers));
}
}
}
@Override
public void emitMetrics(Metric metric) {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
long containersWithRetainedRefsCount = snapshot.stream()
.filter(c -> c.activeContainer.retainCount() > 0)
.count();
metric.set(TOTAL_DEACTIVATED_CONTAINERS, snapshot.size(), null);
metric.set(DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, containersWithRetainedRefsCount, null);
}
@Override
public void close() {
synchronized (monitor) {
scheduler.shutdown();
deactivatedContainers.clear();
destructorReferences.clear();
currentContainer = null;
currentContainerActivationTime = null;
}
}
private static void triggerGc() {
log.log(Level.FINE, "Triggering GC");
System.gc();
System.runFinalization();
}
private void enforceDestructionOfGarbageCollectedContainers() {
log.log(Level.FINE, "Enforcing destruction of GCed containers");
ActiveContainerPhantomReference reference;
while ((reference = (ActiveContainerPhantomReference) garbageCollectedContainers.poll()) != null) {
try {
reference.enforceDestruction();
} catch (Throwable t) {
log.log(Level.SEVERE, "Failed to do post-GC destruction of " + reference.containerName, t);
} finally {
destructorReferences.remove(reference);
reference.clear();
}
}
}
private List<DeactivatedContainer> getDeactivatedContainersSnapshot() {
Instant now = clock.instant();
synchronized (monitor) {
return deactivatedContainers.entrySet().stream()
.filter(e -> e.getValue().isPastGracePeriod(now))
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue()))
.sorted(comparing(e -> e.lifecycleStats.timeActivated))
.collect(toList());
}
}
private static void logWarning(List<DeactivatedContainer> snapshot) {
log.warning(String.format("%s instances of deactivated containers are still alive.", snapshot.size()));
for (DeactivatedContainer deactivatedContainer : snapshot) {
log.warning(" - " + deactivatedContainer.toSummaryString());
}
}
private static class LifecycleStats {
public final Instant timeActivated;
public final Instant timeDeactivated;
public LifecycleStats(Instant timeActivated, Instant timeDeactivated) {
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public boolean isPastGracePeriod(Instant instant) {
return timeDeactivated.plus(ACTIVE_CONTAINER_GRACE_PERIOD).isBefore(instant);
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final LifecycleStats lifecycleStats;
public DeactivatedContainer(ActiveContainer activeContainer, LifecycleStats lifecycleStats) {
this.activeContainer = activeContainer;
this.lifecycleStats = lifecycleStats;
}
public String toSummaryString() {
return String.format("%s: time activated = %s, time deactivated = %s, reference count = %d",
activeContainer.toString(),
lifecycleStats.timeActivated.toString(),
lifecycleStats.timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class ActiveContainerPhantomReference extends PhantomReference<ActiveContainer> {
public final String containerName;
private final ActiveContainer.Destructor destructor;
public ActiveContainerPhantomReference(ActiveContainer activeContainer,
ReferenceQueue<? super ActiveContainer> q) {
super(activeContainer, q);
this.containerName = activeContainer.toString();
this.destructor = activeContainer.destructor;
}
public void enforceDestruction() {
boolean alreadyCompleted = destructor.destruct();
if (!alreadyCompleted) {
log.severe(containerName + " was not correctly cleaned up " +
"because of a resource leak or invalid use of reference counting.");
}
}
}
} |
That's just for historical reasons from when jdisc was separate from vespa. | private void warnOnStaleContainers() {
log.log(Level.FINE, "Checking for stale containers");
try {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
if (snapshot.isEmpty()) return;
logWarning(snapshot);
} catch (Throwable t) {
log.log(Level.WARNING, "Watchdog task died!", t);
}
} | log.log(Level.FINE, "Checking for stale containers"); | private void warnOnStaleContainers() {
log.log(Level.FINE, "Checking for stale containers");
try {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
if (snapshot.isEmpty()) return;
logWarning(snapshot);
} catch (Throwable t) {
log.log(Level.WARNING, "Watchdog task died!", t);
}
} | class ActiveContainerDeactivationWatchdog implements ActiveContainerMetrics, AutoCloseable {
static final Duration WATCHDOG_FREQUENCY = Duration.ofMinutes(20);
static final Duration ACTIVE_CONTAINER_GRACE_PERIOD = Duration.ofHours(1);
static final Duration GC_TRIGGER_FREQUENCY = ACTIVE_CONTAINER_GRACE_PERIOD.minusMinutes(5);
static final Duration ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY = Duration.ofMinutes(5);
private static final Logger log = Logger.getLogger(ActiveContainerDeactivationWatchdog.class.getName());
private final Object monitor = new Object();
private final WeakHashMap<ActiveContainer, LifecycleStats> deactivatedContainers = new WeakHashMap<>();
private final ReferenceQueue<ActiveContainer> garbageCollectedContainers = new ReferenceQueue<>();
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
private final Set<ActiveContainerPhantomReference> destructorReferences = new HashSet<>();
private final ScheduledExecutorService scheduler;
private final Clock clock;
private ActiveContainer currentContainer;
private Instant currentContainerActivationTime;
@Inject
ActiveContainerDeactivationWatchdog() {
this(
Clock.systemUTC(),
new ScheduledThreadPoolExecutor(1, runnable -> {
Thread thread = new Thread(runnable, "active-container-deactivation-watchdog");
thread.setDaemon(true);
return thread;
}));
}
ActiveContainerDeactivationWatchdog(Clock clock, ScheduledExecutorService scheduler) {
this.clock = clock;
this.scheduler = scheduler;
this.scheduler.scheduleAtFixedRate(this::warnOnStaleContainers,
WATCHDOG_FREQUENCY.getSeconds(),
WATCHDOG_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(ActiveContainerDeactivationWatchdog::triggerGc,
GC_TRIGGER_FREQUENCY.getSeconds(),
GC_TRIGGER_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(this::enforceDestructionOfGarbageCollectedContainers,
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
}
void onContainerActivation(ActiveContainer nextContainer) {
synchronized (monitor) {
Instant now = clock.instant();
ActiveContainer previousContainer = currentContainer;
currentContainer = nextContainer;
currentContainerActivationTime = now;
if (previousContainer != null) {
deactivatedContainers.put(previousContainer, new LifecycleStats(currentContainerActivationTime, now));
destructorReferences.add(new ActiveContainerPhantomReference(previousContainer, garbageCollectedContainers));
}
}
}
@Override
public void emitMetrics(Metric metric) {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
long containersWithRetainedRefsCount = snapshot.stream()
.filter(c -> c.activeContainer.retainCount() > 0)
.count();
metric.set(TOTAL_DEACTIVATED_CONTAINERS, snapshot.size(), null);
metric.set(DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, containersWithRetainedRefsCount, null);
}
@Override
public void close() {
synchronized (monitor) {
scheduler.shutdown();
deactivatedContainers.clear();
destructorReferences.clear();
currentContainer = null;
currentContainerActivationTime = null;
}
}
private static void triggerGc() {
log.log(Level.FINE, "Triggering GC");
System.gc();
System.runFinalization();
}
private void enforceDestructionOfGarbageCollectedContainers() {
log.log(Level.FINE, "Enforcing destruction of GCed containers");
ActiveContainerPhantomReference reference;
while ((reference = (ActiveContainerPhantomReference) garbageCollectedContainers.poll()) != null) {
try {
reference.enforceDestruction();
} catch (Throwable t) {
log.log(Level.SEVERE, "Failed to do post-GC destruction of " + reference.containerName, t);
} finally {
destructorReferences.remove(reference);
reference.clear();
}
}
}
private List<DeactivatedContainer> getDeactivatedContainersSnapshot() {
Instant now = clock.instant();
synchronized (monitor) {
return deactivatedContainers.entrySet().stream()
.filter(e -> e.getValue().isPastGracePeriod(now))
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue()))
.sorted(comparing(e -> e.lifecycleStats.timeActivated))
.collect(toList());
}
}
private static void logWarning(List<DeactivatedContainer> snapshot) {
log.warning(String.format("%s instances of deactivated containers are still alive.", snapshot.size()));
for (DeactivatedContainer deactivatedContainer : snapshot) {
log.warning(" - " + deactivatedContainer.toSummaryString());
}
}
private static class LifecycleStats {
public final Instant timeActivated;
public final Instant timeDeactivated;
public LifecycleStats(Instant timeActivated, Instant timeDeactivated) {
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public boolean isPastGracePeriod(Instant instant) {
return timeDeactivated.plus(ACTIVE_CONTAINER_GRACE_PERIOD).isBefore(instant);
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final LifecycleStats lifecycleStats;
public DeactivatedContainer(ActiveContainer activeContainer, LifecycleStats lifecycleStats) {
this.activeContainer = activeContainer;
this.lifecycleStats = lifecycleStats;
}
public String toSummaryString() {
return String.format("%s: time activated = %s, time deactivated = %s, reference count = %d",
activeContainer.toString(),
lifecycleStats.timeActivated.toString(),
lifecycleStats.timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class ActiveContainerPhantomReference extends PhantomReference<ActiveContainer> {
public final String containerName;
private final ActiveContainer.Destructor destructor;
public ActiveContainerPhantomReference(ActiveContainer activeContainer,
ReferenceQueue<? super ActiveContainer> q) {
super(activeContainer, q);
this.containerName = activeContainer.toString();
this.destructor = activeContainer.destructor;
}
public void enforceDestruction() {
boolean alreadyCompleted = destructor.destruct();
if (!alreadyCompleted) {
log.severe(containerName + " was not correctly cleaned up " +
"because of a resource leak or invalid use of reference counting.");
}
}
}
} | class ActiveContainerDeactivationWatchdog implements ActiveContainerMetrics, AutoCloseable {
static final Duration WATCHDOG_FREQUENCY = Duration.ofMinutes(20);
static final Duration ACTIVE_CONTAINER_GRACE_PERIOD = Duration.ofHours(1);
static final Duration GC_TRIGGER_FREQUENCY = ACTIVE_CONTAINER_GRACE_PERIOD.minusMinutes(5);
static final Duration ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY = Duration.ofMinutes(5);
private static final Logger log = Logger.getLogger(ActiveContainerDeactivationWatchdog.class.getName());
private final Object monitor = new Object();
private final WeakHashMap<ActiveContainer, LifecycleStats> deactivatedContainers = new WeakHashMap<>();
private final ReferenceQueue<ActiveContainer> garbageCollectedContainers = new ReferenceQueue<>();
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
private final Set<ActiveContainerPhantomReference> destructorReferences = new HashSet<>();
private final ScheduledExecutorService scheduler;
private final Clock clock;
private ActiveContainer currentContainer;
private Instant currentContainerActivationTime;
@Inject
ActiveContainerDeactivationWatchdog() {
this(
Clock.systemUTC(),
new ScheduledThreadPoolExecutor(1, runnable -> {
Thread thread = new Thread(runnable, "active-container-deactivation-watchdog");
thread.setDaemon(true);
return thread;
}));
}
ActiveContainerDeactivationWatchdog(Clock clock, ScheduledExecutorService scheduler) {
this.clock = clock;
this.scheduler = scheduler;
this.scheduler.scheduleAtFixedRate(this::warnOnStaleContainers,
WATCHDOG_FREQUENCY.getSeconds(),
WATCHDOG_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(ActiveContainerDeactivationWatchdog::triggerGc,
GC_TRIGGER_FREQUENCY.getSeconds(),
GC_TRIGGER_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
this.scheduler.scheduleAtFixedRate(this::enforceDestructionOfGarbageCollectedContainers,
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
ENFORCE_DESTRUCTION_GCED_CONTAINERS_FREQUENCY.getSeconds(),
TimeUnit.SECONDS);
}
void onContainerActivation(ActiveContainer nextContainer) {
synchronized (monitor) {
Instant now = clock.instant();
ActiveContainer previousContainer = currentContainer;
currentContainer = nextContainer;
currentContainerActivationTime = now;
if (previousContainer != null) {
deactivatedContainers.put(previousContainer, new LifecycleStats(currentContainerActivationTime, now));
destructorReferences.add(new ActiveContainerPhantomReference(previousContainer, garbageCollectedContainers));
}
}
}
@Override
public void emitMetrics(Metric metric) {
List<DeactivatedContainer> snapshot = getDeactivatedContainersSnapshot();
long containersWithRetainedRefsCount = snapshot.stream()
.filter(c -> c.activeContainer.retainCount() > 0)
.count();
metric.set(TOTAL_DEACTIVATED_CONTAINERS, snapshot.size(), null);
metric.set(DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES, containersWithRetainedRefsCount, null);
}
@Override
public void close() {
synchronized (monitor) {
scheduler.shutdown();
deactivatedContainers.clear();
destructorReferences.clear();
currentContainer = null;
currentContainerActivationTime = null;
}
}
private static void triggerGc() {
log.log(Level.FINE, "Triggering GC");
System.gc();
System.runFinalization();
}
private void enforceDestructionOfGarbageCollectedContainers() {
log.log(Level.FINE, "Enforcing destruction of GCed containers");
ActiveContainerPhantomReference reference;
while ((reference = (ActiveContainerPhantomReference) garbageCollectedContainers.poll()) != null) {
try {
reference.enforceDestruction();
} catch (Throwable t) {
log.log(Level.SEVERE, "Failed to do post-GC destruction of " + reference.containerName, t);
} finally {
destructorReferences.remove(reference);
reference.clear();
}
}
}
private List<DeactivatedContainer> getDeactivatedContainersSnapshot() {
Instant now = clock.instant();
synchronized (monitor) {
return deactivatedContainers.entrySet().stream()
.filter(e -> e.getValue().isPastGracePeriod(now))
.map(e -> new DeactivatedContainer(e.getKey(), e.getValue()))
.sorted(comparing(e -> e.lifecycleStats.timeActivated))
.collect(toList());
}
}
private static void logWarning(List<DeactivatedContainer> snapshot) {
log.warning(String.format("%s instances of deactivated containers are still alive.", snapshot.size()));
for (DeactivatedContainer deactivatedContainer : snapshot) {
log.warning(" - " + deactivatedContainer.toSummaryString());
}
}
private static class LifecycleStats {
public final Instant timeActivated;
public final Instant timeDeactivated;
public LifecycleStats(Instant timeActivated, Instant timeDeactivated) {
this.timeActivated = timeActivated;
this.timeDeactivated = timeDeactivated;
}
public boolean isPastGracePeriod(Instant instant) {
return timeDeactivated.plus(ACTIVE_CONTAINER_GRACE_PERIOD).isBefore(instant);
}
}
private static class DeactivatedContainer {
public final ActiveContainer activeContainer;
public final LifecycleStats lifecycleStats;
public DeactivatedContainer(ActiveContainer activeContainer, LifecycleStats lifecycleStats) {
this.activeContainer = activeContainer;
this.lifecycleStats = lifecycleStats;
}
public String toSummaryString() {
return String.format("%s: time activated = %s, time deactivated = %s, reference count = %d",
activeContainer.toString(),
lifecycleStats.timeActivated.toString(),
lifecycleStats.timeDeactivated.toString(),
activeContainer.retainCount());
}
}
private static class ActiveContainerPhantomReference extends PhantomReference<ActiveContainer> {
public final String containerName;
private final ActiveContainer.Destructor destructor;
public ActiveContainerPhantomReference(ActiveContainer activeContainer,
ReferenceQueue<? super ActiveContainer> q) {
super(activeContainer, q);
this.containerName = activeContainer.toString();
this.destructor = activeContainer.destructor;
}
public void enforceDestruction() {
boolean alreadyCompleted = destructor.destruct();
if (!alreadyCompleted) {
log.severe(containerName + " was not correctly cleaned up " +
"because of a resource leak or invalid use of reference counting.");
}
}
}
} |
You add another case for that option? Not sure if we understand the question correctly. | protected double getCyclesPerSecond(ArrayList<ParseResult> parseResults){
double cycles = 0;
double seconds = 0;
for (ParseResult parseResult : parseResults){
switch (parseResult.getSearchWord()){
case CYCLES_SEARCH_WORD:
cycles = makeCyclesDouble(parseResult.getValue());
break;
case SECONDS_SEARCH_WORD:
seconds = makeSecondsDouble(parseResult.getValue());
break;
}
}
if (cycles != 0 && seconds != 0){
return convertToGHz(cycles, seconds);
}
return 0;
} | break; | protected double getCyclesPerSecond(ArrayList<ParseResult> parseResults) {
double cycles = -1;
double seconds = -1;
for (ParseResult parseResult : parseResults) {
switch (parseResult.getSearchWord()) {
case CYCLES_SEARCH_WORD:
cycles = makeCyclesDouble(parseResult.getValue());
break;
case SECONDS_SEARCH_WORD:
seconds = makeSecondsDouble(parseResult.getValue());
break;
default:
throw new RuntimeException("Invalid ParseResult searchWord");
}
}
if (cycles > 0 && seconds > 0) {
return convertToGHz(cycles, seconds);
}
return -1;
} | class CPUBenchmark implements Benchmark {
private final String CPU_BENCHMARK_COMMAND = "perf stat -e cycles dd if=/dev/zero of=/dev/null count=100000 2>&1 | grep 'cycles\\|seconds'";
private final String CYCLES_SEARCH_WORD = "cycles";
private final String SECONDS_SEARCH_WORD = "seconds";
private static final Logger logger = Logger.getLogger(CPUBenchmark.class.getName());
private final HardwareResults hardwareResults;
private final CommandExecutor commandExecutor;
public CPUBenchmark(HardwareResults hardwareResults, CommandExecutor commandExecutor){
this.hardwareResults = hardwareResults;
this.commandExecutor = commandExecutor;
}
public void doBenchmark(){
try {
ArrayList<String> commandOutput = commandExecutor.executeCommand(CPU_BENCHMARK_COMMAND);
ArrayList<ParseResult> parseResults = parseCpuCyclesPerSec(commandOutput);
setCpuCyclesPerSec(parseResults);
}
catch (IOException e){
logger.log(Level.WARNING, "Failed to perform CPU benchmark", e);
}
}
protected ArrayList<ParseResult> parseCpuCyclesPerSec(ArrayList<String> commandOutput) {
ArrayList<String> searchWords = new ArrayList<>(Arrays.asList(CYCLES_SEARCH_WORD, SECONDS_SEARCH_WORD));
String splitRegexString = "\\s+";
int searchElementIndex = 1;
int returnElementIndex = 0;
ParseInstructions parseInstructions = new ParseInstructions(searchElementIndex,returnElementIndex,splitRegexString, searchWords);
return OutputParser.parseOutput(parseInstructions,commandOutput);
}
protected void setCpuCyclesPerSec(ArrayList<ParseResult> parseResults){
double cpuCyclesPerSec = getCyclesPerSecond(parseResults);
if (cpuCyclesPerSec > 0) {
hardwareResults.setCpuCyclesPerSec(cpuCyclesPerSec);
}
}
protected double makeCyclesDouble(String cycles){
cycles = cycles.replaceAll("[^\\d]","");
if (checkIfNumber(cycles)){
return Double.parseDouble(cycles);
}
return 0;
}
protected double makeSecondsDouble(String seconds){
seconds = seconds.replaceAll(",",".");
if (checkIfNumber(seconds)){
return Double.parseDouble(seconds);
}
return 0;
}
protected boolean checkIfNumber(String numberCandidate){
if (numberCandidate == null || numberCandidate.equals("")){
return false;
}
try{
Double.parseDouble(numberCandidate);
}
catch (NumberFormatException e){
return false;
}
return true;
}
protected double convertToGHz(double cycles, double seconds){
double giga = 1000000000.0;
return (cycles/seconds) / giga;
}
} | class CPUBenchmark implements Benchmark {
private static final String CPU_BENCHMARK_COMMAND = "perf stat -e cycles dd if=/dev/zero of=/dev/null count=100000 2>&1 | grep 'cycles\\|seconds'";
private static final String CYCLES_SEARCH_WORD = "cycles";
private static final String SECONDS_SEARCH_WORD = "seconds";
private static final String SPLIT_REGEX_STRING = "\\s+";
private static final int SEARCH_ELEMENT_INDEX = 1;
private static final int RETURN_ELEMENT_INDEX = 0;
private static final Logger logger = Logger.getLogger(CPUBenchmark.class.getName());
private final HardwareResults hardwareResults;
private final CommandExecutor commandExecutor;
public CPUBenchmark(HardwareResults hardwareResults, CommandExecutor commandExecutor) {
this.hardwareResults = hardwareResults;
this.commandExecutor = commandExecutor;
}
public void doBenchmark() {
try {
ArrayList<String> commandOutput = commandExecutor.executeCommand(CPU_BENCHMARK_COMMAND);
ArrayList<ParseResult> parseResults = parseCpuCyclesPerSec(commandOutput);
setCpuCyclesPerSec(parseResults);
} catch (IOException e) {
logger.log(Level.WARNING, "Failed to perform CPU benchmark", e);
}
}
protected ArrayList<ParseResult> parseCpuCyclesPerSec(ArrayList<String> commandOutput) {
ArrayList<String> searchWords = new ArrayList<>(Arrays.asList(CYCLES_SEARCH_WORD, SECONDS_SEARCH_WORD));
ParseInstructions parseInstructions = new ParseInstructions(SEARCH_ELEMENT_INDEX, RETURN_ELEMENT_INDEX, SPLIT_REGEX_STRING, searchWords);
return OutputParser.parseOutput(parseInstructions, commandOutput);
}
protected void setCpuCyclesPerSec(ArrayList<ParseResult> parseResults) {
double cpuCyclesPerSec = getCyclesPerSecond(parseResults);
if (cpuCyclesPerSec > 0) {
hardwareResults.setCpuCyclesPerSec(cpuCyclesPerSec);
}
}
protected double makeCyclesDouble(String cycles) {
cycles = cycles.replaceAll("[^\\d]", "");
if (checkIfNumber(cycles)) {
return Double.parseDouble(cycles);
}
return -1;
}
protected double makeSecondsDouble(String seconds) {
seconds = seconds.replaceAll(",", ".");
if (checkIfNumber(seconds)) {
return Double.parseDouble(seconds);
}
return -1;
}
protected boolean checkIfNumber(String numberCandidate) {
if (numberCandidate == null || numberCandidate.equals("")) {
return false;
}
try {
Double.parseDouble(numberCandidate);
} catch (NumberFormatException e) {
return false;
}
return true;
}
protected double convertToGHz(double cycles, double seconds) {
double giga = 1000000000.0;
return (cycles / seconds) / giga;
}
} |
Correct - add a default statement where you throw a runtime exception or something. | protected double getCyclesPerSecond(ArrayList<ParseResult> parseResults){
double cycles = 0;
double seconds = 0;
for (ParseResult parseResult : parseResults){
switch (parseResult.getSearchWord()){
case CYCLES_SEARCH_WORD:
cycles = makeCyclesDouble(parseResult.getValue());
break;
case SECONDS_SEARCH_WORD:
seconds = makeSecondsDouble(parseResult.getValue());
break;
}
}
if (cycles != 0 && seconds != 0){
return convertToGHz(cycles, seconds);
}
return 0;
} | break; | protected double getCyclesPerSecond(ArrayList<ParseResult> parseResults) {
double cycles = -1;
double seconds = -1;
for (ParseResult parseResult : parseResults) {
switch (parseResult.getSearchWord()) {
case CYCLES_SEARCH_WORD:
cycles = makeCyclesDouble(parseResult.getValue());
break;
case SECONDS_SEARCH_WORD:
seconds = makeSecondsDouble(parseResult.getValue());
break;
default:
throw new RuntimeException("Invalid ParseResult searchWord");
}
}
if (cycles > 0 && seconds > 0) {
return convertToGHz(cycles, seconds);
}
return -1;
} | class CPUBenchmark implements Benchmark {
private final String CPU_BENCHMARK_COMMAND = "perf stat -e cycles dd if=/dev/zero of=/dev/null count=100000 2>&1 | grep 'cycles\\|seconds'";
private final String CYCLES_SEARCH_WORD = "cycles";
private final String SECONDS_SEARCH_WORD = "seconds";
private static final Logger logger = Logger.getLogger(CPUBenchmark.class.getName());
private final HardwareResults hardwareResults;
private final CommandExecutor commandExecutor;
public CPUBenchmark(HardwareResults hardwareResults, CommandExecutor commandExecutor){
this.hardwareResults = hardwareResults;
this.commandExecutor = commandExecutor;
}
public void doBenchmark(){
try {
ArrayList<String> commandOutput = commandExecutor.executeCommand(CPU_BENCHMARK_COMMAND);
ArrayList<ParseResult> parseResults = parseCpuCyclesPerSec(commandOutput);
setCpuCyclesPerSec(parseResults);
}
catch (IOException e){
logger.log(Level.WARNING, "Failed to perform CPU benchmark", e);
}
}
protected ArrayList<ParseResult> parseCpuCyclesPerSec(ArrayList<String> commandOutput) {
ArrayList<String> searchWords = new ArrayList<>(Arrays.asList(CYCLES_SEARCH_WORD, SECONDS_SEARCH_WORD));
String splitRegexString = "\\s+";
int searchElementIndex = 1;
int returnElementIndex = 0;
ParseInstructions parseInstructions = new ParseInstructions(searchElementIndex,returnElementIndex,splitRegexString, searchWords);
return OutputParser.parseOutput(parseInstructions,commandOutput);
}
protected void setCpuCyclesPerSec(ArrayList<ParseResult> parseResults){
double cpuCyclesPerSec = getCyclesPerSecond(parseResults);
if (cpuCyclesPerSec > 0) {
hardwareResults.setCpuCyclesPerSec(cpuCyclesPerSec);
}
}
protected double makeCyclesDouble(String cycles){
cycles = cycles.replaceAll("[^\\d]","");
if (checkIfNumber(cycles)){
return Double.parseDouble(cycles);
}
return 0;
}
protected double makeSecondsDouble(String seconds){
seconds = seconds.replaceAll(",",".");
if (checkIfNumber(seconds)){
return Double.parseDouble(seconds);
}
return 0;
}
protected boolean checkIfNumber(String numberCandidate){
if (numberCandidate == null || numberCandidate.equals("")){
return false;
}
try{
Double.parseDouble(numberCandidate);
}
catch (NumberFormatException e){
return false;
}
return true;
}
protected double convertToGHz(double cycles, double seconds){
double giga = 1000000000.0;
return (cycles/seconds) / giga;
}
} | class CPUBenchmark implements Benchmark {
private static final String CPU_BENCHMARK_COMMAND = "perf stat -e cycles dd if=/dev/zero of=/dev/null count=100000 2>&1 | grep 'cycles\\|seconds'";
private static final String CYCLES_SEARCH_WORD = "cycles";
private static final String SECONDS_SEARCH_WORD = "seconds";
private static final String SPLIT_REGEX_STRING = "\\s+";
private static final int SEARCH_ELEMENT_INDEX = 1;
private static final int RETURN_ELEMENT_INDEX = 0;
private static final Logger logger = Logger.getLogger(CPUBenchmark.class.getName());
private final HardwareResults hardwareResults;
private final CommandExecutor commandExecutor;
public CPUBenchmark(HardwareResults hardwareResults, CommandExecutor commandExecutor) {
this.hardwareResults = hardwareResults;
this.commandExecutor = commandExecutor;
}
public void doBenchmark() {
try {
ArrayList<String> commandOutput = commandExecutor.executeCommand(CPU_BENCHMARK_COMMAND);
ArrayList<ParseResult> parseResults = parseCpuCyclesPerSec(commandOutput);
setCpuCyclesPerSec(parseResults);
} catch (IOException e) {
logger.log(Level.WARNING, "Failed to perform CPU benchmark", e);
}
}
protected ArrayList<ParseResult> parseCpuCyclesPerSec(ArrayList<String> commandOutput) {
ArrayList<String> searchWords = new ArrayList<>(Arrays.asList(CYCLES_SEARCH_WORD, SECONDS_SEARCH_WORD));
ParseInstructions parseInstructions = new ParseInstructions(SEARCH_ELEMENT_INDEX, RETURN_ELEMENT_INDEX, SPLIT_REGEX_STRING, searchWords);
return OutputParser.parseOutput(parseInstructions, commandOutput);
}
protected void setCpuCyclesPerSec(ArrayList<ParseResult> parseResults) {
double cpuCyclesPerSec = getCyclesPerSecond(parseResults);
if (cpuCyclesPerSec > 0) {
hardwareResults.setCpuCyclesPerSec(cpuCyclesPerSec);
}
}
protected double makeCyclesDouble(String cycles) {
cycles = cycles.replaceAll("[^\\d]", "");
if (checkIfNumber(cycles)) {
return Double.parseDouble(cycles);
}
return -1;
}
protected double makeSecondsDouble(String seconds) {
seconds = seconds.replaceAll(",", ".");
if (checkIfNumber(seconds)) {
return Double.parseDouble(seconds);
}
return -1;
}
protected boolean checkIfNumber(String numberCandidate) {
if (numberCandidate == null || numberCandidate.equals("")) {
return false;
}
try {
Double.parseDouble(numberCandidate);
} catch (NumberFormatException e) {
return false;
}
return true;
}
protected double convertToGHz(double cycles, double seconds) {
double giga = 1000000000.0;
return (cycles / seconds) / giga;
}
} |
`getDiscUsedInBytes` should be renamed to match this method ("disc" -> "disk"). | public Optional<Long> getDiskUsageFor(ContainerName containerName) {
Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/");
try {
return Optional.of(getDiscUsedInBytes(containerDir));
} catch (Throwable e) {
PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e);
return Optional.empty();
}
} | return Optional.of(getDiscUsedInBytes(containerDir)); | public Optional<Long> getDiskUsageFor(ContainerName containerName) {
Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/");
try {
return Optional.of(getDiskUsedInBytes(containerDir));
} catch (Throwable e) {
PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e);
return Optional.empty();
}
} | class StorageMaintainer {
private static final Pattern TOTAL_MEMORY_PATTERN = Pattern.compile("^MemTotal:\\s*(?<totalMem>\\d+) kB$", Pattern.MULTILINE);
private static final ContainerName NODE_ADMIN = new ContainerName("node-admin");
private static final ObjectMapper objectMapper = new ObjectMapper();
private static Optional<String> kernelVersion = Optional.empty();
private final Logger logger = Logger.getLogger(StorageMaintainer.class.getName());
private final CounterWrapper numberOfNodeAdminMaintenanceFails;
private final Docker docker;
private final Environment environment;
private final Clock clock;
private double hostTotalMemoryGb = 0;
private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>();
public StorageMaintainer(Docker docker, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) {
this.docker = docker;
this.environment = environment;
this.clock = clock;
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails");
}
public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) {
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("parentHostname", environment.getParentHostHostname());
Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life"));
SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath)
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner -> hostLifeSchedule
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance)
.withTag("applicationName", owner.application)
.withTag("instanceName", owner.instance)
.withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership -> hostLifeSchedule
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version));
try {
vespaSchedule.writeTo(yamasAgentFolder);
hostLifeSchedule.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"};
docker.executeInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) {
PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (!config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
long getDiscUsedInBytes(Path path) throws IOException, InterruptedException {
final String[] command = {"du", "-xsk", path.toString()};
Process duCommand = new ProcessBuilder().command(command).start();
if (!duCommand.waitFor(60, TimeUnit.SECONDS)) {
duCommand.destroy();
throw new RuntimeException("Disk usage command timedout, aborting.");
}
String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream()));
String error = IOUtils.readAll(new InputStreamReader(duCommand.getErrorStream()));
if (! error.isEmpty()) {
throw new RuntimeException("Disk usage wrote to error log: " + error);
}
String[] results = output.split("\t");
if (results.length != 2) {
throw new RuntimeException("Result from disk usage command not as expected: " + output);
}
long diskUsageKB = Long.valueOf(results[0]);
return diskUsageKB * 1024;
}
Optional<String> readMeminfo() {
try {
return Optional.of(new String(Files.readAllBytes(Paths.get("/proc/meminfo"))));
} catch (IOException e) {
logger.log(LogLevel.WARNING, "Failed to read meminfo", e);
return Optional.empty();
}
}
public double getHostTotalMemoryGb() {
if (hostTotalMemoryGb == 0) {
readMeminfo().ifPresent(memInfo -> {
Matcher matcher = TOTAL_MEMORY_PATTERN.matcher(memInfo);
if (matcher.find()) {
hostTotalMemoryGb = Integer.valueOf(matcher.group("totalMem")) / 1024d / 1024;
} else {
logger.log(LogLevel.WARNING, "Failed to parse total memory from meminfo: " + memInfo);
}
});
}
return hostTotalMemoryGb;
}
/**
* Deletes old log files for vespa, nginx, logstash, etc.
*/
public void removeOldFilesFromNode(ContainerName containerName) {
if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return;
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
String[] pathsToClean = {
getDefaults().underVespaHome("logs/elasticsearch2"),
getDefaults().underVespaHome("logs/logstash2"),
getDefaults().underVespaHome("logs/daemontools_y"),
getDefaults().underVespaHome("logs/nginx"),
getDefaults().underVespaHome("logs/vespa")
};
for (String pathToClean : pathsToClean) {
Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean);
if (Files.exists(path)) {
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", path)
.withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
.withArgument("fileNameRegex", ".*\\.log\\..+")
.withArgument("recursive", false);
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", path)
.withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
.withArgument("fileNameRegex", ".*QueryAccessLog.*")
.withArgument("recursive", false);
}
}
Path logArchiveDir = environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("logs/vespa/logarchive"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", logArchiveDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", false);
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("var/db/vespa/filedistribution"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", fileDistrDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", true);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime();
}
/**
* Checks if container has any new coredumps, reports and archives them if so
*/
public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, Environment environment) {
if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow()) return;
Map<String, Object> attributes = new HashMap<>();
attributes.put("hostname", nodeSpec.hostname);
attributes.put("parent_hostname", HostName.getLocalhost());
attributes.put("region", environment.getRegion());
attributes.put("environment", environment.getEnvironment());
attributes.put("flavor", nodeSpec.nodeFlavor);
try {
attributes.put("kernel_version", getKernelVersion());
} catch (Throwable ignored) {
attributes.put("kernel_version", "unknown");
}
nodeSpec.wantedDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString()));
nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version));
nodeSpec.owner.ifPresent(owner -> {
attributes.put("tenant", owner.tenant);
attributes.put("application", owner.application);
attributes.put("instance", owner.instance);
});
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("handle-core-dumps")
.withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps())
.withArgument("coredumpsPath",
environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("var/crash")))
.withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint())
.withArgument("attributes", attributes);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime();
}
/**
* Deletes old
* * archived app data
* * Vespa logs
* * Filedistribution files
*/
public void cleanNodeAdmin() {
if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return;
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("delete-directories")
.withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin())
.withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds())
.withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX));
Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode(NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", nodeAdminJDiskLogsPath)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", false);
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", fileDistrDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", true);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime();
}
/**
* Archives container data, runs when container enters state "dirty"
*/
public void archiveNodeData(ContainerName containerName) {
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("recursive-delete")
.withArgument("path", environment.pathInNodeAdminFromPathInNode(containerName, getDefaults().underVespaHome("var")));
maintainerExecutor.addJob("move-files")
.withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/"))
.withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName));
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).reset();
}
private String getKernelVersion() throws IOException, InterruptedException {
if (! kernelVersion.isPresent()) {
Pair<Integer, String> result = new ProcessExecuter().exec(new String[]{"uname", "-r"});
if (result.getFirst() == 0) {
kernelVersion = Optional.of(result.getSecond().trim());
} else {
throw new RuntimeException("Failed to get kernel version\n" + result);
}
}
return kernelVersion.orElse("unknown");
}
/**
* Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM
*/
private class MaintainerExecutor {
private final List<MaintainerExecutorJob> jobs = new ArrayList<>();
private final ContainerName executeIn;
MaintainerExecutor(ContainerName executeIn) {
this.executeIn = executeIn;
}
MaintainerExecutor() {
this(NODE_ADMIN);
}
MaintainerExecutorJob addJob(String jobName) {
MaintainerExecutorJob job = new MaintainerExecutorJob(jobName);
jobs.add(job);
return job;
}
void execute() {
String args;
try {
args = objectMapper.writeValueAsString(jobs);
} catch (JsonProcessingException e) {
throw new RuntimeException("Failed transform list of maintenance jobs to JSON");
}
String[] command = {"java",
"-cp", getDefaults().underVespaHome("lib/jars/node-maintainer-jar-with-dependencies.jar"),
"-Dvespa.log.target=file:" + getDefaults().underVespaHome("logs/vespa/maintainer.log"),
"com.yahoo.vespa.hosted.node.maintainer.Maintainer", args};
ProcessResult result = docker.executeInContainerAsRoot(executeIn, command);
if (! result.isSuccess()) {
numberOfNodeAdminMaintenanceFails.add();
throw new RuntimeException("Failed to run maintenance jobs: " + args + result);
}
}
}
private class MaintainerExecutorJob {
@JsonProperty(value="type")
private final String type;
@JsonProperty(value="arguments")
private final Map<String, Object> arguments = new HashMap<>();
MaintainerExecutorJob(String type) {
this.type = type;
}
MaintainerExecutorJob withArgument(String argument, Object value) {
arguments.put(argument, (value instanceof Path) ? value.toString() : value);
return this;
}
}
private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) {
if (! maintenanceThrottlerByContainerName.containsKey(containerName)) {
maintenanceThrottlerByContainerName.put(containerName, new MaintenanceThrottler());
}
return maintenanceThrottlerByContainerName.get(containerName);
}
private class MaintenanceThrottler {
private Instant nextRemoveOldFilesAt;
private Instant nextHandleOldCoredumpsAt;
MaintenanceThrottler() {
reset();
}
void updateNextRemoveOldFilesTime() {
nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1));
}
boolean shouldRemoveOldFilesNow() {
return !nextRemoveOldFilesAt.isAfter(clock.instant());
}
void updateNextHandleCoredumpsTime() {
nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofHours(1));
}
boolean shouldHandleCoredumpsNow() {
return !nextHandleOldCoredumpsAt.isAfter(clock.instant());
}
void reset() {
nextRemoveOldFilesAt = Instant.EPOCH;
nextHandleOldCoredumpsAt = Instant.EPOCH;
}
}
} | class StorageMaintainer {
private static final Pattern TOTAL_MEMORY_PATTERN = Pattern.compile("^MemTotal:\\s*(?<totalMem>\\d+) kB$", Pattern.MULTILINE);
private static final ContainerName NODE_ADMIN = new ContainerName("node-admin");
private static final ObjectMapper objectMapper = new ObjectMapper();
private static Optional<String> kernelVersion = Optional.empty();
private final Logger logger = Logger.getLogger(StorageMaintainer.class.getName());
private final CounterWrapper numberOfNodeAdminMaintenanceFails;
private final Docker docker;
private final Environment environment;
private final Clock clock;
private double hostTotalMemoryGb = 0;
private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>();
public StorageMaintainer(Docker docker, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) {
this.docker = docker;
this.environment = environment;
this.clock = clock;
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails");
}
public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) {
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("parentHostname", environment.getParentHostHostname());
Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life"));
SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath)
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner -> hostLifeSchedule
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance)
.withTag("applicationName", owner.application)
.withTag("instanceName", owner.instance)
.withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership -> hostLifeSchedule
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version));
try {
vespaSchedule.writeTo(yamasAgentFolder);
hostLifeSchedule.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"};
docker.executeInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) {
PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (!config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
long getDiskUsedInBytes(Path path) throws IOException, InterruptedException {
final String[] command = {"du", "-xsk", path.toString()};
Process duCommand = new ProcessBuilder().command(command).start();
if (!duCommand.waitFor(60, TimeUnit.SECONDS)) {
duCommand.destroy();
throw new RuntimeException("Disk usage command timedout, aborting.");
}
String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream()));
String error = IOUtils.readAll(new InputStreamReader(duCommand.getErrorStream()));
if (! error.isEmpty()) {
throw new RuntimeException("Disk usage wrote to error log: " + error);
}
String[] results = output.split("\t");
if (results.length != 2) {
throw new RuntimeException("Result from disk usage command not as expected: " + output);
}
long diskUsageKB = Long.valueOf(results[0]);
return diskUsageKB * 1024;
}
Optional<String> readMeminfo() {
try {
return Optional.of(new String(Files.readAllBytes(Paths.get("/proc/meminfo"))));
} catch (IOException e) {
logger.log(LogLevel.WARNING, "Failed to read meminfo", e);
return Optional.empty();
}
}
public double getHostTotalMemoryGb() {
if (hostTotalMemoryGb == 0) {
readMeminfo().ifPresent(memInfo -> {
Matcher matcher = TOTAL_MEMORY_PATTERN.matcher(memInfo);
if (matcher.find()) {
hostTotalMemoryGb = Integer.valueOf(matcher.group("totalMem")) / 1024d / 1024;
} else {
logger.log(LogLevel.WARNING, "Failed to parse total memory from meminfo: " + memInfo);
}
});
}
return hostTotalMemoryGb;
}
/**
* Deletes old log files for vespa, nginx, logstash, etc.
*/
public void removeOldFilesFromNode(ContainerName containerName) {
if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return;
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
String[] pathsToClean = {
getDefaults().underVespaHome("logs/elasticsearch2"),
getDefaults().underVespaHome("logs/logstash2"),
getDefaults().underVespaHome("logs/daemontools_y"),
getDefaults().underVespaHome("logs/nginx"),
getDefaults().underVespaHome("logs/vespa")
};
for (String pathToClean : pathsToClean) {
Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean);
if (Files.exists(path)) {
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", path)
.withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
.withArgument("fileNameRegex", ".*\\.log\\..+")
.withArgument("recursive", false);
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", path)
.withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
.withArgument("fileNameRegex", ".*QueryAccessLog.*")
.withArgument("recursive", false);
}
}
Path logArchiveDir = environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("logs/vespa/logarchive"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", logArchiveDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", false);
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("var/db/vespa/filedistribution"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", fileDistrDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", true);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime();
}
/**
* Checks if container has any new coredumps, reports and archives them if so
*/
public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, Environment environment) {
if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow()) return;
Map<String, Object> attributes = new HashMap<>();
attributes.put("hostname", nodeSpec.hostname);
attributes.put("parent_hostname", HostName.getLocalhost());
attributes.put("region", environment.getRegion());
attributes.put("environment", environment.getEnvironment());
attributes.put("flavor", nodeSpec.nodeFlavor);
try {
attributes.put("kernel_version", getKernelVersion());
} catch (Throwable ignored) {
attributes.put("kernel_version", "unknown");
}
nodeSpec.wantedDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString()));
nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version));
nodeSpec.owner.ifPresent(owner -> {
attributes.put("tenant", owner.tenant);
attributes.put("application", owner.application);
attributes.put("instance", owner.instance);
});
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("handle-core-dumps")
.withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps())
.withArgument("coredumpsPath",
environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("var/crash")))
.withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint())
.withArgument("attributes", attributes);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime();
}
/**
* Deletes old
* * archived app data
* * Vespa logs
* * Filedistribution files
*/
public void cleanNodeAdmin() {
if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return;
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("delete-directories")
.withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin())
.withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds())
.withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX));
Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode(NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", nodeAdminJDiskLogsPath)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", false);
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", fileDistrDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", true);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime();
}
/**
* Archives container data, runs when container enters state "dirty"
*/
public void archiveNodeData(ContainerName containerName) {
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("recursive-delete")
.withArgument("path", environment.pathInNodeAdminFromPathInNode(containerName, getDefaults().underVespaHome("var")));
maintainerExecutor.addJob("move-files")
.withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/"))
.withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName));
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).reset();
}
private String getKernelVersion() throws IOException, InterruptedException {
if (! kernelVersion.isPresent()) {
Pair<Integer, String> result = new ProcessExecuter().exec(new String[]{"uname", "-r"});
if (result.getFirst() == 0) {
kernelVersion = Optional.of(result.getSecond().trim());
} else {
throw new RuntimeException("Failed to get kernel version\n" + result);
}
}
return kernelVersion.orElse("unknown");
}
/**
* Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM
*/
private class MaintainerExecutor {
private final List<MaintainerExecutorJob> jobs = new ArrayList<>();
private final ContainerName executeIn;
MaintainerExecutor(ContainerName executeIn) {
this.executeIn = executeIn;
}
MaintainerExecutor() {
this(NODE_ADMIN);
}
MaintainerExecutorJob addJob(String jobName) {
MaintainerExecutorJob job = new MaintainerExecutorJob(jobName);
jobs.add(job);
return job;
}
void execute() {
String args;
try {
args = objectMapper.writeValueAsString(jobs);
} catch (JsonProcessingException e) {
throw new RuntimeException("Failed transform list of maintenance jobs to JSON");
}
String[] command = {"java",
"-cp", getDefaults().underVespaHome("lib/jars/node-maintainer-jar-with-dependencies.jar"),
"-Dvespa.log.target=file:" + getDefaults().underVespaHome("logs/vespa/maintainer.log"),
"com.yahoo.vespa.hosted.node.maintainer.Maintainer", args};
ProcessResult result = docker.executeInContainerAsRoot(executeIn, command);
if (! result.isSuccess()) {
numberOfNodeAdminMaintenanceFails.add();
throw new RuntimeException("Failed to run maintenance jobs: " + args + result);
}
}
}
private class MaintainerExecutorJob {
@JsonProperty(value="type")
private final String type;
@JsonProperty(value="arguments")
private final Map<String, Object> arguments = new HashMap<>();
MaintainerExecutorJob(String type) {
this.type = type;
}
MaintainerExecutorJob withArgument(String argument, Object value) {
arguments.put(argument, (value instanceof Path) ? value.toString() : value);
return this;
}
}
private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) {
if (! maintenanceThrottlerByContainerName.containsKey(containerName)) {
maintenanceThrottlerByContainerName.put(containerName, new MaintenanceThrottler());
}
return maintenanceThrottlerByContainerName.get(containerName);
}
private class MaintenanceThrottler {
private Instant nextRemoveOldFilesAt;
private Instant nextHandleOldCoredumpsAt;
MaintenanceThrottler() {
reset();
}
void updateNextRemoveOldFilesTime() {
nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1));
}
boolean shouldRemoveOldFilesNow() {
return !nextRemoveOldFilesAt.isAfter(clock.instant());
}
void updateNextHandleCoredumpsTime() {
nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofHours(1));
}
boolean shouldHandleCoredumpsNow() {
return !nextHandleOldCoredumpsAt.isAfter(clock.instant());
}
void reset() {
nextRemoveOldFilesAt = Instant.EPOCH;
nextHandleOldCoredumpsAt = Instant.EPOCH;
}
}
} |
Renamed. | public Optional<Long> getDiskUsageFor(ContainerName containerName) {
Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/");
try {
return Optional.of(getDiscUsedInBytes(containerDir));
} catch (Throwable e) {
PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e);
return Optional.empty();
}
} | return Optional.of(getDiscUsedInBytes(containerDir)); | public Optional<Long> getDiskUsageFor(ContainerName containerName) {
Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/");
try {
return Optional.of(getDiskUsedInBytes(containerDir));
} catch (Throwable e) {
PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e);
return Optional.empty();
}
} | class StorageMaintainer {
private static final Pattern TOTAL_MEMORY_PATTERN = Pattern.compile("^MemTotal:\\s*(?<totalMem>\\d+) kB$", Pattern.MULTILINE);
private static final ContainerName NODE_ADMIN = new ContainerName("node-admin");
private static final ObjectMapper objectMapper = new ObjectMapper();
private static Optional<String> kernelVersion = Optional.empty();
private final Logger logger = Logger.getLogger(StorageMaintainer.class.getName());
private final CounterWrapper numberOfNodeAdminMaintenanceFails;
private final Docker docker;
private final Environment environment;
private final Clock clock;
private double hostTotalMemoryGb = 0;
private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>();
public StorageMaintainer(Docker docker, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) {
this.docker = docker;
this.environment = environment;
this.clock = clock;
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails");
}
public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) {
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("parentHostname", environment.getParentHostHostname());
Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life"));
SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath)
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner -> hostLifeSchedule
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance)
.withTag("applicationName", owner.application)
.withTag("instanceName", owner.instance)
.withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership -> hostLifeSchedule
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version));
try {
vespaSchedule.writeTo(yamasAgentFolder);
hostLifeSchedule.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"};
docker.executeInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) {
PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (!config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
long getDiscUsedInBytes(Path path) throws IOException, InterruptedException {
final String[] command = {"du", "-xsk", path.toString()};
Process duCommand = new ProcessBuilder().command(command).start();
if (!duCommand.waitFor(60, TimeUnit.SECONDS)) {
duCommand.destroy();
throw new RuntimeException("Disk usage command timedout, aborting.");
}
String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream()));
String error = IOUtils.readAll(new InputStreamReader(duCommand.getErrorStream()));
if (! error.isEmpty()) {
throw new RuntimeException("Disk usage wrote to error log: " + error);
}
String[] results = output.split("\t");
if (results.length != 2) {
throw new RuntimeException("Result from disk usage command not as expected: " + output);
}
long diskUsageKB = Long.valueOf(results[0]);
return diskUsageKB * 1024;
}
Optional<String> readMeminfo() {
try {
return Optional.of(new String(Files.readAllBytes(Paths.get("/proc/meminfo"))));
} catch (IOException e) {
logger.log(LogLevel.WARNING, "Failed to read meminfo", e);
return Optional.empty();
}
}
public double getHostTotalMemoryGb() {
if (hostTotalMemoryGb == 0) {
readMeminfo().ifPresent(memInfo -> {
Matcher matcher = TOTAL_MEMORY_PATTERN.matcher(memInfo);
if (matcher.find()) {
hostTotalMemoryGb = Integer.valueOf(matcher.group("totalMem")) / 1024d / 1024;
} else {
logger.log(LogLevel.WARNING, "Failed to parse total memory from meminfo: " + memInfo);
}
});
}
return hostTotalMemoryGb;
}
/**
* Deletes old log files for vespa, nginx, logstash, etc.
*/
public void removeOldFilesFromNode(ContainerName containerName) {
if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return;
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
String[] pathsToClean = {
getDefaults().underVespaHome("logs/elasticsearch2"),
getDefaults().underVespaHome("logs/logstash2"),
getDefaults().underVespaHome("logs/daemontools_y"),
getDefaults().underVespaHome("logs/nginx"),
getDefaults().underVespaHome("logs/vespa")
};
for (String pathToClean : pathsToClean) {
Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean);
if (Files.exists(path)) {
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", path)
.withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
.withArgument("fileNameRegex", ".*\\.log\\..+")
.withArgument("recursive", false);
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", path)
.withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
.withArgument("fileNameRegex", ".*QueryAccessLog.*")
.withArgument("recursive", false);
}
}
Path logArchiveDir = environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("logs/vespa/logarchive"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", logArchiveDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", false);
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("var/db/vespa/filedistribution"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", fileDistrDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", true);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime();
}
/**
* Checks if container has any new coredumps, reports and archives them if so
*/
public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, Environment environment) {
if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow()) return;
Map<String, Object> attributes = new HashMap<>();
attributes.put("hostname", nodeSpec.hostname);
attributes.put("parent_hostname", HostName.getLocalhost());
attributes.put("region", environment.getRegion());
attributes.put("environment", environment.getEnvironment());
attributes.put("flavor", nodeSpec.nodeFlavor);
try {
attributes.put("kernel_version", getKernelVersion());
} catch (Throwable ignored) {
attributes.put("kernel_version", "unknown");
}
nodeSpec.wantedDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString()));
nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version));
nodeSpec.owner.ifPresent(owner -> {
attributes.put("tenant", owner.tenant);
attributes.put("application", owner.application);
attributes.put("instance", owner.instance);
});
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("handle-core-dumps")
.withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps())
.withArgument("coredumpsPath",
environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("var/crash")))
.withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint())
.withArgument("attributes", attributes);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime();
}
/**
* Deletes old
* * archived app data
* * Vespa logs
* * Filedistribution files
*/
public void cleanNodeAdmin() {
if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return;
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("delete-directories")
.withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin())
.withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds())
.withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX));
Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode(NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", nodeAdminJDiskLogsPath)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", false);
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", fileDistrDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", true);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime();
}
/**
* Archives container data, runs when container enters state "dirty"
*/
public void archiveNodeData(ContainerName containerName) {
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("recursive-delete")
.withArgument("path", environment.pathInNodeAdminFromPathInNode(containerName, getDefaults().underVespaHome("var")));
maintainerExecutor.addJob("move-files")
.withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/"))
.withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName));
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).reset();
}
private String getKernelVersion() throws IOException, InterruptedException {
if (! kernelVersion.isPresent()) {
Pair<Integer, String> result = new ProcessExecuter().exec(new String[]{"uname", "-r"});
if (result.getFirst() == 0) {
kernelVersion = Optional.of(result.getSecond().trim());
} else {
throw new RuntimeException("Failed to get kernel version\n" + result);
}
}
return kernelVersion.orElse("unknown");
}
/**
* Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM
*/
private class MaintainerExecutor {
private final List<MaintainerExecutorJob> jobs = new ArrayList<>();
private final ContainerName executeIn;
MaintainerExecutor(ContainerName executeIn) {
this.executeIn = executeIn;
}
MaintainerExecutor() {
this(NODE_ADMIN);
}
MaintainerExecutorJob addJob(String jobName) {
MaintainerExecutorJob job = new MaintainerExecutorJob(jobName);
jobs.add(job);
return job;
}
void execute() {
String args;
try {
args = objectMapper.writeValueAsString(jobs);
} catch (JsonProcessingException e) {
throw new RuntimeException("Failed transform list of maintenance jobs to JSON");
}
String[] command = {"java",
"-cp", getDefaults().underVespaHome("lib/jars/node-maintainer-jar-with-dependencies.jar"),
"-Dvespa.log.target=file:" + getDefaults().underVespaHome("logs/vespa/maintainer.log"),
"com.yahoo.vespa.hosted.node.maintainer.Maintainer", args};
ProcessResult result = docker.executeInContainerAsRoot(executeIn, command);
if (! result.isSuccess()) {
numberOfNodeAdminMaintenanceFails.add();
throw new RuntimeException("Failed to run maintenance jobs: " + args + result);
}
}
}
private class MaintainerExecutorJob {
@JsonProperty(value="type")
private final String type;
@JsonProperty(value="arguments")
private final Map<String, Object> arguments = new HashMap<>();
MaintainerExecutorJob(String type) {
this.type = type;
}
MaintainerExecutorJob withArgument(String argument, Object value) {
arguments.put(argument, (value instanceof Path) ? value.toString() : value);
return this;
}
}
private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) {
if (! maintenanceThrottlerByContainerName.containsKey(containerName)) {
maintenanceThrottlerByContainerName.put(containerName, new MaintenanceThrottler());
}
return maintenanceThrottlerByContainerName.get(containerName);
}
private class MaintenanceThrottler {
private Instant nextRemoveOldFilesAt;
private Instant nextHandleOldCoredumpsAt;
MaintenanceThrottler() {
reset();
}
void updateNextRemoveOldFilesTime() {
nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1));
}
boolean shouldRemoveOldFilesNow() {
return !nextRemoveOldFilesAt.isAfter(clock.instant());
}
void updateNextHandleCoredumpsTime() {
nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofHours(1));
}
boolean shouldHandleCoredumpsNow() {
return !nextHandleOldCoredumpsAt.isAfter(clock.instant());
}
void reset() {
nextRemoveOldFilesAt = Instant.EPOCH;
nextHandleOldCoredumpsAt = Instant.EPOCH;
}
}
} | class StorageMaintainer {
private static final Pattern TOTAL_MEMORY_PATTERN = Pattern.compile("^MemTotal:\\s*(?<totalMem>\\d+) kB$", Pattern.MULTILINE);
private static final ContainerName NODE_ADMIN = new ContainerName("node-admin");
private static final ObjectMapper objectMapper = new ObjectMapper();
private static Optional<String> kernelVersion = Optional.empty();
private final Logger logger = Logger.getLogger(StorageMaintainer.class.getName());
private final CounterWrapper numberOfNodeAdminMaintenanceFails;
private final Docker docker;
private final Environment environment;
private final Clock clock;
private double hostTotalMemoryGb = 0;
private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>();
public StorageMaintainer(Docker docker, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) {
this.docker = docker;
this.environment = environment;
this.clock = clock;
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails");
}
public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) {
final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/");
Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa"));
SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all")
.withTag("parentHostname", environment.getParentHostHostname());
Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life"));
SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath)
.withTag("namespace", "Vespa")
.withTag("role", "tenants")
.withTag("flavor", nodeSpec.nodeFlavor)
.withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor)
.withTag("state", nodeSpec.nodeState.toString())
.withTag("zone", environment.getZone())
.withTag("parentHostname", environment.getParentHostHostname());
nodeSpec.owner.ifPresent(owner -> hostLifeSchedule
.withTag("tenantName", owner.tenant)
.withTag("app", owner.application + "." + owner.instance)
.withTag("applicationName", owner.application)
.withTag("instanceName", owner.instance)
.withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance));
nodeSpec.membership.ifPresent(membership -> hostLifeSchedule
.withTag("clustertype", membership.clusterType)
.withTag("clusterid", membership.clusterId));
nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version));
try {
vespaSchedule.writeTo(yamasAgentFolder);
hostLifeSchedule.writeTo(yamasAgentFolder);
final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"};
docker.executeInContainerAsRoot(containerName, restartYamasAgent);
} catch (IOException e) {
throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e);
}
}
public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) {
PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec);
if (!config.isPresent()) {
logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString());
return;
}
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml");
Files.write(filebeatPath, config.get().getBytes());
logger.info("Wrote filebeat config.");
} catch (Throwable t) {
logger.error("Failed writing filebeat config; " + nodeSpec, t);
}
}
long getDiskUsedInBytes(Path path) throws IOException, InterruptedException {
final String[] command = {"du", "-xsk", path.toString()};
Process duCommand = new ProcessBuilder().command(command).start();
if (!duCommand.waitFor(60, TimeUnit.SECONDS)) {
duCommand.destroy();
throw new RuntimeException("Disk usage command timedout, aborting.");
}
String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream()));
String error = IOUtils.readAll(new InputStreamReader(duCommand.getErrorStream()));
if (! error.isEmpty()) {
throw new RuntimeException("Disk usage wrote to error log: " + error);
}
String[] results = output.split("\t");
if (results.length != 2) {
throw new RuntimeException("Result from disk usage command not as expected: " + output);
}
long diskUsageKB = Long.valueOf(results[0]);
return diskUsageKB * 1024;
}
Optional<String> readMeminfo() {
try {
return Optional.of(new String(Files.readAllBytes(Paths.get("/proc/meminfo"))));
} catch (IOException e) {
logger.log(LogLevel.WARNING, "Failed to read meminfo", e);
return Optional.empty();
}
}
public double getHostTotalMemoryGb() {
if (hostTotalMemoryGb == 0) {
readMeminfo().ifPresent(memInfo -> {
Matcher matcher = TOTAL_MEMORY_PATTERN.matcher(memInfo);
if (matcher.find()) {
hostTotalMemoryGb = Integer.valueOf(matcher.group("totalMem")) / 1024d / 1024;
} else {
logger.log(LogLevel.WARNING, "Failed to parse total memory from meminfo: " + memInfo);
}
});
}
return hostTotalMemoryGb;
}
/**
* Deletes old log files for vespa, nginx, logstash, etc.
*/
public void removeOldFilesFromNode(ContainerName containerName) {
if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return;
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
String[] pathsToClean = {
getDefaults().underVespaHome("logs/elasticsearch2"),
getDefaults().underVespaHome("logs/logstash2"),
getDefaults().underVespaHome("logs/daemontools_y"),
getDefaults().underVespaHome("logs/nginx"),
getDefaults().underVespaHome("logs/vespa")
};
for (String pathToClean : pathsToClean) {
Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean);
if (Files.exists(path)) {
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", path)
.withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
.withArgument("fileNameRegex", ".*\\.log\\..+")
.withArgument("recursive", false);
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", path)
.withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
.withArgument("fileNameRegex", ".*QueryAccessLog.*")
.withArgument("recursive", false);
}
}
Path logArchiveDir = environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("logs/vespa/logarchive"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", logArchiveDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", false);
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("var/db/vespa/filedistribution"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", fileDistrDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", true);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime();
}
/**
* Checks if container has any new coredumps, reports and archives them if so
*/
public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, Environment environment) {
if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow()) return;
Map<String, Object> attributes = new HashMap<>();
attributes.put("hostname", nodeSpec.hostname);
attributes.put("parent_hostname", HostName.getLocalhost());
attributes.put("region", environment.getRegion());
attributes.put("environment", environment.getEnvironment());
attributes.put("flavor", nodeSpec.nodeFlavor);
try {
attributes.put("kernel_version", getKernelVersion());
} catch (Throwable ignored) {
attributes.put("kernel_version", "unknown");
}
nodeSpec.wantedDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString()));
nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version));
nodeSpec.owner.ifPresent(owner -> {
attributes.put("tenant", owner.tenant);
attributes.put("application", owner.application);
attributes.put("instance", owner.instance);
});
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("handle-core-dumps")
.withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps())
.withArgument("coredumpsPath",
environment.pathInNodeAdminFromPathInNode(containerName,
getDefaults().underVespaHome("var/crash")))
.withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint())
.withArgument("attributes", attributes);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime();
}
/**
* Deletes old
* * archived app data
* * Vespa logs
* * Filedistribution files
*/
public void cleanNodeAdmin() {
if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return;
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("delete-directories")
.withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin())
.withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds())
.withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX));
Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode(NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", nodeAdminJDiskLogsPath)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", false);
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution"));
maintainerExecutor.addJob("delete-files")
.withArgument("basePath", fileDistrDir)
.withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
.withArgument("recursive", true);
maintainerExecutor.execute();
getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime();
}
/**
* Archives container data, runs when container enters state "dirty"
*/
public void archiveNodeData(ContainerName containerName) {
MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
maintainerExecutor.addJob("recursive-delete")
.withArgument("path", environment.pathInNodeAdminFromPathInNode(containerName, getDefaults().underVespaHome("var")));
maintainerExecutor.addJob("move-files")
.withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/"))
.withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName));
maintainerExecutor.execute();
getMaintenanceThrottlerFor(containerName).reset();
}
private String getKernelVersion() throws IOException, InterruptedException {
if (! kernelVersion.isPresent()) {
Pair<Integer, String> result = new ProcessExecuter().exec(new String[]{"uname", "-r"});
if (result.getFirst() == 0) {
kernelVersion = Optional.of(result.getSecond().trim());
} else {
throw new RuntimeException("Failed to get kernel version\n" + result);
}
}
return kernelVersion.orElse("unknown");
}
/**
* Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM
*/
private class MaintainerExecutor {
private final List<MaintainerExecutorJob> jobs = new ArrayList<>();
private final ContainerName executeIn;
MaintainerExecutor(ContainerName executeIn) {
this.executeIn = executeIn;
}
MaintainerExecutor() {
this(NODE_ADMIN);
}
MaintainerExecutorJob addJob(String jobName) {
MaintainerExecutorJob job = new MaintainerExecutorJob(jobName);
jobs.add(job);
return job;
}
void execute() {
String args;
try {
args = objectMapper.writeValueAsString(jobs);
} catch (JsonProcessingException e) {
throw new RuntimeException("Failed transform list of maintenance jobs to JSON");
}
String[] command = {"java",
"-cp", getDefaults().underVespaHome("lib/jars/node-maintainer-jar-with-dependencies.jar"),
"-Dvespa.log.target=file:" + getDefaults().underVespaHome("logs/vespa/maintainer.log"),
"com.yahoo.vespa.hosted.node.maintainer.Maintainer", args};
ProcessResult result = docker.executeInContainerAsRoot(executeIn, command);
if (! result.isSuccess()) {
numberOfNodeAdminMaintenanceFails.add();
throw new RuntimeException("Failed to run maintenance jobs: " + args + result);
}
}
}
private class MaintainerExecutorJob {
@JsonProperty(value="type")
private final String type;
@JsonProperty(value="arguments")
private final Map<String, Object> arguments = new HashMap<>();
MaintainerExecutorJob(String type) {
this.type = type;
}
MaintainerExecutorJob withArgument(String argument, Object value) {
arguments.put(argument, (value instanceof Path) ? value.toString() : value);
return this;
}
}
private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) {
if (! maintenanceThrottlerByContainerName.containsKey(containerName)) {
maintenanceThrottlerByContainerName.put(containerName, new MaintenanceThrottler());
}
return maintenanceThrottlerByContainerName.get(containerName);
}
private class MaintenanceThrottler {
private Instant nextRemoveOldFilesAt;
private Instant nextHandleOldCoredumpsAt;
MaintenanceThrottler() {
reset();
}
void updateNextRemoveOldFilesTime() {
nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1));
}
boolean shouldRemoveOldFilesNow() {
return !nextRemoveOldFilesAt.isAfter(clock.instant());
}
void updateNextHandleCoredumpsTime() {
nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofHours(1));
}
boolean shouldHandleCoredumpsNow() {
return !nextHandleOldCoredumpsAt.isAfter(clock.instant());
}
void reset() {
nextRemoveOldFilesAt = Instant.EPOCH;
nextHandleOldCoredumpsAt = Instant.EPOCH;
}
}
} |
I think the intended meaning is "don't just add other checks here *without thinking*" | protected boolean summaryNeedsQuery(Query query) {
if (query.getRanking().getQueryCache()) return false;
DocumentDatabase documentDb = getDocumentDatabase(query);
DocsumDefinition docsumDefinition = documentDb.getDocsumDefinitionSet().getDocsumDefinition(query.getPresentation().getSummary());
if (docsumDefinition == null) return true;
if (docsumDefinition.isDynamic()) return true;
RankProfile rankProfile = documentDb.rankProfiles().get(query.getRanking().getProfile());
if (rankProfile == null) return true;
if (rankProfile.hasSummaryFeatures()) return true;
if (query.getRanking().getListFeatures()) return true;
return false;
} | protected boolean summaryNeedsQuery(Query query) {
if (query.getRanking().getQueryCache()) return false;
DocumentDatabase documentDb = getDocumentDatabase(query);
DocsumDefinition docsumDefinition = documentDb.getDocsumDefinitionSet().getDocsumDefinition(query.getPresentation().getSummary());
if (docsumDefinition == null) return true;
if (docsumDefinition.isDynamic()) return true;
RankProfile rankProfile = documentDb.rankProfiles().get(query.getRanking().getProfile());
if (rankProfile == null) return true;
if (rankProfile.hasSummaryFeatures()) return true;
if (query.getRanking().getListFeatures()) return true;
return false;
} | class VespaBackEndSearcher extends PingableSearcher {
private static final CompoundName grouping=new CompoundName("grouping");
private static final CompoundName combinerows=new CompoundName("combinerows");
/** If this is turned on this will fill summaries by dispatching directly to search nodes over RPC */
private final static CompoundName dispatchSummaries = new CompoundName("dispatch.summaries");
protected static final CompoundName PACKET_COMPRESSION_LIMIT = new CompoundName("packetcompressionlimit");
protected static final CompoundName PACKET_COMPRESSION_TYPE = new CompoundName("packetcompressiontype");
protected static final CompoundName TRACE_DISABLE = new CompoundName("trace.disable");
/** The set of all document databases available in the backend handled by this searcher */
private Map<String, DocumentDatabase> documentDbs = new LinkedHashMap<>();
private DocumentDatabase defaultDocumentDb = null;
/** Default docsum class. null means "unset" and is the default value */
private String defaultDocsumClass = null;
/** Returns an iterator which returns all hits below this result **/
protected Iterator<Hit> hitIterator(Result result) {
return result.hits().unorderedDeepIterator();
}
private boolean localDispatching = true;
/** The name of this source */
private String name;
/** Cache wrapper */
protected CacheControl cacheControl = null;
/**
* The number of last significant bits in the partId which specifies the
* row number in this backend,
* the rest specifies the column. 0 if not known.
*/
private int rowBits = 0;
/** Searchcluster number */
private int sourceNumber;
protected final String getName() { return name; }
protected final String getDefaultDocsumClass() { return defaultDocsumClass; }
/** Sets default document summary class. Default is null */
private void setDefaultDocsumClass(String docsumClass) { defaultDocsumClass = docsumClass; }
/** Returns the packet cache controller of this */
public final CacheControl getCacheControl() { return cacheControl; }
/**
* Searches a search cluster
* This is an endpoint - searchers will never propagate the search to any nested searcher.
*
* @param query the query to search
* @param queryPacket the serialized query representation to pass to the search cluster
* @param cacheKey the cache key created from the query packet, or null if caching is not used
* @param execution the query execution context
*/
protected abstract Result doSearch2(Query query, QueryPacket queryPacket, CacheKey cacheKey, Execution execution);
protected abstract void doPartialFill(Result result, String summaryClass);
protected static boolean wantsRPCSummarFill(Query query) {
return query.properties().getBoolean(dispatchSummaries);
}
/**
* Returns whether we need to send the query when fetching summaries.
* This is necessary if the query requests summary features or dynamic snippeting
*/
private Result cacheLookupFirstPhase(CacheKey key, QueryPacketData queryPacketData, Query query, int offset, int hits, String summaryClass) throws IOException {
PacketWrapper packetWrapper = cacheControl.lookup(key, query);
if (packetWrapper == null) return null;
List<DocumentInfo> documents = packetWrapper.getDocuments(offset, hits);
if (documents == null) return null;
if (query.getPresentation().getSummary() == null)
query.getPresentation().setSummary(getDefaultDocsumClass());
Result result = new Result(query);
QueryResultPacket resultPacket = packetWrapper.getFirstResultPacket();
addMetaInfo(query, queryPacketData, resultPacket, result, true);
if (packetWrapper.getNumPackets() == 0)
addUnfilledHits(result, documents, true, queryPacketData, key);
else
addCachedHits(result, packetWrapper, summaryClass, documents);
return result;
}
protected DocumentDatabase getDocumentDatabase(Query query) {
if (query.getModel().getRestrict().size() == 1) {
String docTypeName = (String)query.getModel().getRestrict().toArray()[0];
DocumentDatabase db = documentDbs.get(docTypeName);
if (db != null) {
return db;
}
}
return defaultDocumentDb;
}
private void resolveDocumentDatabase(Query query) {
DocumentDatabase docDb = getDocumentDatabase(query);
if (docDb != null) {
query.getModel().setDocumentDb(docDb.getName());
}
}
public final void init(SummaryParameters docSumParams, ClusterParams clusterParams, CacheParams cacheParams,
DocumentdbInfoConfig documentdbInfoConfig) {
this.name = clusterParams.searcherName;
this.sourceNumber = clusterParams.clusterNumber;
this.rowBits = clusterParams.rowBits;
Validator.ensureNotNull("Name of Vespa backend integration", getName());
setDefaultDocsumClass(docSumParams.defaultClass);
if (documentdbInfoConfig != null) {
for (DocumentdbInfoConfig.Documentdb docDb : documentdbInfoConfig.documentdb()) {
DocumentDatabase db = new DocumentDatabase(docDb, clusterParams.emulation);
if (documentDbs.isEmpty()) {
defaultDocumentDb = db;
}
documentDbs.put(docDb.name(), db);
}
}
if (cacheParams.cacheControl == null) {
this.cacheControl = new CacheControl(cacheParams.cacheMegaBytes, cacheParams.cacheTimeOutSeconds);
} else {
this.cacheControl = cacheParams.cacheControl;
}
}
protected void transformQuery(Query query) { }
public Result search(Query query, Execution execution) {
Item root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem) {
return new Result(query, ErrorMessage.createNullQuery(query.getHttpRequest().getUri().toString()));
}
if (wantsRPCSummarFill(query) && summaryNeedsQuery(query)) {
return new Result(query, ErrorMessage.createInvalidQueryParameter(
"When using dispatch.summaries and your summary/rankprofile require the query, " +
" you need to enable ranking.queryCache."));
}
QueryRewrite.optimizeByRestrict(query);
QueryRewrite.optimizeAndNot(query);
QueryRewrite.collapseSingleComposites(query);
root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem)
return new Result(query);
resolveDocumentDatabase(query);
transformQuery(query);
traceQuery(name, "search", query, query.getOffset(), query.getHits(), 1, Optional.<String>empty());
root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem)
return new Result(query);
QueryPacket queryPacket = QueryPacket.create(query);
int compressionLimit = query.properties().getInteger(PACKET_COMPRESSION_LIMIT, 0);
queryPacket.setCompressionLimit(compressionLimit);
if (compressionLimit != 0)
queryPacket.setCompressionType(query.properties().getString(PACKET_COMPRESSION_TYPE, "lz4"));
if (isLoggingFine())
getLogger().fine("made QueryPacket: " + queryPacket);
Result result = null;
CacheKey cacheKey = null;
if (cacheControl.useCache(query)) {
cacheKey = new CacheKey(queryPacket);
result = getCached(cacheKey, queryPacket.getQueryPacketData(), query);
}
if (result == null) {
result = doSearch2(query, queryPacket, cacheKey, execution);
if (isLoggingFine())
getLogger().fine("Result NOT retrieved from cache");
if (query.getTraceLevel() >= 1)
query.trace(getName() + " dispatch response: " + result, false, 1);
result.trace(getName());
}
return result;
}
/**
* Returns a cached result, or null if no result was cached for this key
*
* @param cacheKey the cache key created from the query packet
* @param queryPacketData a serialization of the query, to avoid having to recompute this, or null if not available
* @param query the query, used for tracing, lookup of result window and result creation
*/
private Result getCached(CacheKey cacheKey, QueryPacketData queryPacketData, Query query) {
if (query.getTraceLevel() >= 6) {
query.trace("Cache key hash: " + cacheKey.hashCode(), 6);
if (query.getTraceLevel() >= 8) {
query.trace("Cache key: " + HexDump.toHexString(cacheKey.getCopyOfFullKey()), 8);
}
}
try {
Result result = cacheLookupFirstPhase(cacheKey, queryPacketData, query, query.getOffset(), query.getHits(), query.getPresentation().getSummary());
if (result == null) return null;
if (isLoggingFine()) {
getLogger().fine("Result retrieved from cache: " + result);
}
if (query.getTraceLevel() >= 1) {
query.trace(getName() + " cached response: " + result, false, 1);
}
result.trace(getName());
return result;
}
catch (IOException e) {
Result result = new Result(query);
if (result.hits().getErrorHit() == null) {
result.hits().setError(ErrorMessage.createBackendCommunicationError(
"Fast Search (" + getName() + ") failed: " + e.getMessage()));
}
if (query.getTraceLevel() >= 1) {
query.trace(getName() + " error response: " + result, false, 1);
}
return result;
}
}
private List<Result> partitionHits(Result result, String summaryClass) {
List<Result> parts = new ArrayList<>();
TinyIdentitySet<Query> queryMap = new TinyIdentitySet<>(4);
for (Iterator<Hit> i = hitIterator(result); i.hasNext(); ) {
Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
if ( ! fastHit.isFilled(summaryClass)) {
Query q = fastHit.getQuery();
if (q == null) {
q = result.hits().getQuery();
}
int idx = queryMap.indexOf(q);
if (idx < 0) {
idx = queryMap.size();
Result r = new Result(q);
parts.add(r);
queryMap.add(q);
}
parts.get(idx).hits().add(fastHit);
}
}
}
return parts;
}
@Override
public void fill(Result result, String summaryClass, Execution execution) {
if (result.isFilled(summaryClass)) return;
List<Result> parts= partitionHits(result, summaryClass);
if (parts.size() > 0) {
for (Result r : parts) {
doPartialFill(r, summaryClass);
mergeErrorsInto(result, r);
}
result.hits().setSorted(false);
result.analyzeHits();
}
}
private void mergeErrorsInto(Result destination, Result source) {
ErrorHit eh = source.hits().getErrorHit();
if (eh != null) {
for (ErrorMessage error : eh.errors())
destination.hits().addError(error);
}
}
static void traceQuery(String sourceName, String type, Query query, int offset, int hits, int level, Optional<String> quotedSummaryClass) {
if ((query.getTraceLevel()<level) || query.properties().getBoolean(TRACE_DISABLE)) return;
StringBuilder s = new StringBuilder();
s.append(sourceName).append(" " + type + " to dispatch: ")
.append("query=[")
.append(query.getModel().getQueryTree().getRoot().toString())
.append("]");
s.append(" timeout=").append(query.getTimeout()).append("ms");
s.append(" offset=")
.append(offset)
.append(" hits=")
.append(hits);
if (query.getRanking().hasRankProfile()) {
s.append(" rankprofile[")
.append(query.getRanking().getProfile())
.append("]");
}
if (query.getRanking().getFreshness() != null) {
s.append(" freshness=")
.append(query.getRanking().getFreshness().getRefTime());
}
if (query.getRanking().getSorting() != null) {
s.append(" sortspec=")
.append(query.getRanking().getSorting().fieldOrders().toString());
}
if (query.getRanking().getLocation() != null) {
s.append(" location=")
.append(query.getRanking().getLocation().toString());
}
List<Grouping> grouping = GroupingExecutor.getGroupingList(query);
s.append(" grouping=").append(grouping.size()).append(" : ");
for(Grouping g : grouping) {
s.append(g.toString());
}
if ( ! query.getRanking().getProperties().isEmpty()) {
s.append(" rankproperties=")
.append(query.getRanking().getProperties().toString());
}
if ( ! query.getRanking().getFeatures().isEmpty()) {
s.append(" rankfeatures=")
.append(query.getRanking().getFeatures().toString());
}
if (query.getModel().getRestrict() != null) {
s.append(" restrict=").append(query.getModel().getRestrict().toString());
}
if (quotedSummaryClass.isPresent()) {
s.append(" summary=").append(quotedSummaryClass.get());
}
query.trace(s.toString(), false, level);
if (query.isTraceable(level + 1)) {
query.trace("Current state of query tree: "
+ new TextualQueryRepresentation(query.getModel().getQueryTree().getRoot()),
false, level+1);
}
if (query.isTraceable(level + 2)) {
query.trace("YQL+ representation: " + query.yqlRepresentation(), level+2);
}
}
protected void addMetaInfo(Query query, QueryPacketData queryPacketData, QueryResultPacket resultPacket, Result result, boolean fromCache) {
result.setTotalHitCount(resultPacket.getTotalDocumentCount());
if (resultPacket.getGroupData() != null) {
byte[] data = resultPacket.getGroupData();
ArrayList<Grouping> list = new ArrayList<>();
BufferSerializer buf = new BufferSerializer(new GrowableByteBuffer(ByteBuffer.wrap(data)));
int cnt = buf.getInt(null);
for (int i = 0; i < cnt; i++) {
Grouping g = new Grouping();
g.deserialize(buf);
list.add(g);
}
GroupingListHit hit = new GroupingListHit(list, getDocsumDefinitionSet(query));
hit.setQuery(result.getQuery());
hit.setSource(getName());
hit.setSourceNumber(sourceNumber);
hit.setQueryPacketData(queryPacketData);
result.hits().add(hit);
}
if (resultPacket.getCoverageFeature()) {
result.setCoverage(new Coverage(resultPacket.getCoverageDocs(), resultPacket.getActiveDocs())
.setSoonActive(resultPacket.getSoonActiveDocs())
.setDegradedReason(resultPacket.getDegradedReason()));
}
}
private boolean fillHit(FastHit hit, DocsumPacket packet, String summaryClass) {
if (packet != null) {
byte[] docsumdata = packet.getData();
if (docsumdata.length > 0) {
decodeSummary(summaryClass, hit, docsumdata);
return true;
}
}
return false;
}
/**
* Fills the hits.
*
* @return the number of hits that we did not return data for, i.e
* when things are working normally we return 0.
*/
protected int fillHits(Result result, int packetIndex, Packet[] packets, String summaryClass) throws IOException {
int skippedHits=0;
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
Hit hit = i.next();
if (hit instanceof FastHit && !hit.isFilled(summaryClass)) {
FastHit fastHit = (FastHit) hit;
ensureInstanceOf(DocsumPacket.class, packets[packetIndex], getName());
DocsumPacket docsum = (DocsumPacket) packets[packetIndex];
packetIndex++;
if ( ! fillHit(fastHit, docsum, summaryClass))
skippedHits++;
}
}
result.hits().setSorted(false);
return skippedHits;
}
/**
* Throws an IOException if the packet is not of the expected type
*/
protected static void ensureInstanceOf(Class<? extends BasicPacket> type, BasicPacket packet, String name) throws IOException {
if ((type.isAssignableFrom(packet.getClass()))) return;
if (packet instanceof ErrorPacket) {
ErrorPacket errorPacket=(ErrorPacket)packet;
if (errorPacket.getErrorCode() == 8)
throw new TimeoutException("Query timed out in " + name);
else
throw new IOException("Received error from backend in " + name + ": " + packet);
} else {
throw new IOException("Received " + packet + " when expecting " + type);
}
}
private boolean addCachedHits(Result result,
PacketWrapper packetWrapper,
String summaryClass,
List<DocumentInfo> documents) {
boolean filledAllOfEm = true;
Query myQuery = result.getQuery();
for (DocumentInfo document : documents) {
FastHit hit = new FastHit();
hit.setQuery(myQuery);
hit.setUseRowInIndexUri(useRowInIndexUri(result));
hit.setFillable();
hit.setCached(true);
extractDocumentInfo(hit, document);
DocsumPacket docsum = (DocsumPacket) packetWrapper.getPacket(document.getGlobalId(), document.getPartId(), summaryClass);
if (docsum != null) {
byte[] docsumdata = docsum.getData();
if (docsumdata.length > 0) {
decodeSummary(summaryClass, hit, docsumdata);
} else {
filledAllOfEm = false;
}
} else {
filledAllOfEm = false;
}
result.hits().add(hit);
}
return filledAllOfEm;
}
private boolean useRowInIndexUri(Result result) {
return ! ((result.getQuery().properties().getString(grouping) != null) || result.getQuery().properties().getBoolean(combinerows));
}
private void extractDocumentInfo(FastHit hit, DocumentInfo document) {
hit.setSourceNumber(sourceNumber);
hit.setSource(getName());
Number rank = document.getMetric();
hit.setRelevance(new Relevance(rank.doubleValue()));
hit.setDistributionKey(document.getDistributionKey());
hit.setGlobalId(document.getGlobalId());
hit.setPartId(document.getPartId(), rowBits);
}
protected PacketWrapper cacheLookupTwoPhase(CacheKey cacheKey, Result result, String summaryClass) {
Query query = result.getQuery();
PacketWrapper packetWrapper = cacheControl.lookup(cacheKey, query);
if (packetWrapper == null) {
return null;
}
if (packetWrapper.getNumPackets() != 0) {
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
DocsumPacketKey key = new DocsumPacketKey(fastHit.getGlobalId(), fastHit.getPartId(), summaryClass);
if (fillHit(fastHit,
(DocsumPacket) packetWrapper.getPacket(key),
summaryClass)) {
fastHit.setCached(true);
}
}
}
result.hits().setSorted(false);
result.analyzeHits();
}
return packetWrapper;
}
protected DocsumDefinitionSet getDocsumDefinitionSet(Query query) {
DocumentDatabase db = getDocumentDatabase(query);
return db.getDocsumDefinitionSet();
}
private void decodeSummary(String summaryClass, FastHit hit, byte[] docsumdata) {
DocumentDatabase db = getDocumentDatabase(hit.getQuery());
hit.setField(Hit.SDDOCNAME_FIELD, db.getName());
decodeSummary(summaryClass, hit, docsumdata, db.getDocsumDefinitionSet());
}
private void decodeSummary(String summaryClass, FastHit hit, byte[] docsumdata, DocsumDefinitionSet docsumSet) {
docsumSet.lazyDecode(summaryClass, docsumdata, hit);
hit.setFilled(summaryClass);
}
/**
* Creates unfilled hits from a List of DocumentInfo instances. Do note
* cacheKey should be available if a cache is active, even if the hit is not
* created from a cache in the current call path.
*
* @param queryPacketData binary data from first phase of search, or null
* @param cacheKey the key this hit should match in the packet cache, or null
*/
protected boolean addUnfilledHits(Result result, List<DocumentInfo> documents, boolean fromCache, QueryPacketData queryPacketData, CacheKey cacheKey) {
boolean allHitsOK = true;
Query myQuery = result.getQuery();
for (DocumentInfo document : documents) {
try {
FastHit hit = new FastHit();
hit.setQuery(myQuery);
if (queryPacketData != null)
hit.setQueryPacketData(queryPacketData);
hit.setCacheKey(cacheKey);
hit.setUseRowInIndexUri(useRowInIndexUri(result));
hit.setFillable();
hit.setCached(fromCache);
extractDocumentInfo(hit, document);
result.hits().add(hit);
} catch (ConfigurationException e) {
allHitsOK = false;
getLogger().log(LogLevel.WARNING, "Skipping hit", e);
} catch (Exception e) {
allHitsOK = false;
getLogger().log(LogLevel.ERROR, "Skipping malformed hit", e);
}
}
return allHitsOK;
}
@SuppressWarnings("rawtypes")
public static VespaBackEndSearcher getSearcher(String s) {
try {
Class c = Class.forName(s);
if (VespaBackEndSearcher.class.isAssignableFrom(c)) {
Constructor[] constructors = c.getConstructors();
for (Constructor constructor : constructors) {
Class[] parameters = constructor.getParameterTypes();
if (parameters.length == 0) {
return (VespaBackEndSearcher) constructor.newInstance();
}
}
throw new RuntimeException("Failed initializing " + s);
} else {
throw new RuntimeException(s + " is not com.yahoo.prelude.fastsearch.VespaBackEndSearcher");
}
} catch (Exception e) {
throw new RuntimeException("Failure loading class " + s + ", exception :" + e);
}
}
protected boolean isLoggingFine() {
return getLogger().isLoggable(Level.FINE);
}
public boolean isLocalDispatching() { return localDispatching; }
public void setLocalDispatching(boolean localDispatching) {
this.localDispatching = localDispatching;
}
} | class VespaBackEndSearcher extends PingableSearcher {
private static final CompoundName grouping=new CompoundName("grouping");
private static final CompoundName combinerows=new CompoundName("combinerows");
/** If this is turned on this will fill summaries by dispatching directly to search nodes over RPC */
private final static CompoundName dispatchSummaries = new CompoundName("dispatch.summaries");
protected static final CompoundName PACKET_COMPRESSION_LIMIT = new CompoundName("packetcompressionlimit");
protected static final CompoundName PACKET_COMPRESSION_TYPE = new CompoundName("packetcompressiontype");
protected static final CompoundName TRACE_DISABLE = new CompoundName("trace.disable");
/** The set of all document databases available in the backend handled by this searcher */
private Map<String, DocumentDatabase> documentDbs = new LinkedHashMap<>();
private DocumentDatabase defaultDocumentDb = null;
/** Default docsum class. null means "unset" and is the default value */
private String defaultDocsumClass = null;
/** Returns an iterator which returns all hits below this result **/
protected Iterator<Hit> hitIterator(Result result) {
return result.hits().unorderedDeepIterator();
}
private boolean localDispatching = true;
/** The name of this source */
private String name;
/** Cache wrapper */
protected CacheControl cacheControl = null;
/**
* The number of last significant bits in the partId which specifies the
* row number in this backend,
* the rest specifies the column. 0 if not known.
*/
private int rowBits = 0;
/** Searchcluster number */
private int sourceNumber;
protected final String getName() { return name; }
protected final String getDefaultDocsumClass() { return defaultDocsumClass; }
/** Sets default document summary class. Default is null */
private void setDefaultDocsumClass(String docsumClass) { defaultDocsumClass = docsumClass; }
/** Returns the packet cache controller of this */
public final CacheControl getCacheControl() { return cacheControl; }
/**
* Searches a search cluster
* This is an endpoint - searchers will never propagate the search to any nested searcher.
*
* @param query the query to search
* @param queryPacket the serialized query representation to pass to the search cluster
* @param cacheKey the cache key created from the query packet, or null if caching is not used
* @param execution the query execution context
*/
protected abstract Result doSearch2(Query query, QueryPacket queryPacket, CacheKey cacheKey, Execution execution);
protected abstract void doPartialFill(Result result, String summaryClass);
protected static boolean wantsRPCSummarFill(Query query) {
return query.properties().getBoolean(dispatchSummaries);
}
/**
* Returns whether we need to send the query when fetching summaries.
* This is necessary if the query requests summary features or dynamic snippeting
*/
private Result cacheLookupFirstPhase(CacheKey key, QueryPacketData queryPacketData, Query query, int offset, int hits, String summaryClass) throws IOException {
PacketWrapper packetWrapper = cacheControl.lookup(key, query);
if (packetWrapper == null) return null;
List<DocumentInfo> documents = packetWrapper.getDocuments(offset, hits);
if (documents == null) return null;
if (query.getPresentation().getSummary() == null)
query.getPresentation().setSummary(getDefaultDocsumClass());
Result result = new Result(query);
QueryResultPacket resultPacket = packetWrapper.getFirstResultPacket();
addMetaInfo(query, queryPacketData, resultPacket, result, true);
if (packetWrapper.getNumPackets() == 0)
addUnfilledHits(result, documents, true, queryPacketData, key);
else
addCachedHits(result, packetWrapper, summaryClass, documents);
return result;
}
protected DocumentDatabase getDocumentDatabase(Query query) {
if (query.getModel().getRestrict().size() == 1) {
String docTypeName = (String)query.getModel().getRestrict().toArray()[0];
DocumentDatabase db = documentDbs.get(docTypeName);
if (db != null) {
return db;
}
}
return defaultDocumentDb;
}
private void resolveDocumentDatabase(Query query) {
DocumentDatabase docDb = getDocumentDatabase(query);
if (docDb != null) {
query.getModel().setDocumentDb(docDb.getName());
}
}
public final void init(SummaryParameters docSumParams, ClusterParams clusterParams, CacheParams cacheParams,
DocumentdbInfoConfig documentdbInfoConfig) {
this.name = clusterParams.searcherName;
this.sourceNumber = clusterParams.clusterNumber;
this.rowBits = clusterParams.rowBits;
Validator.ensureNotNull("Name of Vespa backend integration", getName());
setDefaultDocsumClass(docSumParams.defaultClass);
if (documentdbInfoConfig != null) {
for (DocumentdbInfoConfig.Documentdb docDb : documentdbInfoConfig.documentdb()) {
DocumentDatabase db = new DocumentDatabase(docDb, clusterParams.emulation);
if (documentDbs.isEmpty()) {
defaultDocumentDb = db;
}
documentDbs.put(docDb.name(), db);
}
}
if (cacheParams.cacheControl == null) {
this.cacheControl = new CacheControl(cacheParams.cacheMegaBytes, cacheParams.cacheTimeOutSeconds);
} else {
this.cacheControl = cacheParams.cacheControl;
}
}
protected void transformQuery(Query query) { }
public Result search(Query query, Execution execution) {
Item root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem) {
return new Result(query, ErrorMessage.createNullQuery(query.getHttpRequest().getUri().toString()));
}
if (wantsRPCSummarFill(query) && summaryNeedsQuery(query)) {
return new Result(query, ErrorMessage.createInvalidQueryParameter(
"When using dispatch.summaries and your summary/rankprofile require the query, " +
" you need to enable ranking.queryCache."));
}
QueryRewrite.optimizeByRestrict(query);
QueryRewrite.optimizeAndNot(query);
QueryRewrite.collapseSingleComposites(query);
root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem)
return new Result(query);
resolveDocumentDatabase(query);
transformQuery(query);
traceQuery(name, "search", query, query.getOffset(), query.getHits(), 1, Optional.<String>empty());
root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem)
return new Result(query);
QueryPacket queryPacket = QueryPacket.create(query);
int compressionLimit = query.properties().getInteger(PACKET_COMPRESSION_LIMIT, 0);
queryPacket.setCompressionLimit(compressionLimit);
if (compressionLimit != 0)
queryPacket.setCompressionType(query.properties().getString(PACKET_COMPRESSION_TYPE, "lz4"));
if (isLoggingFine())
getLogger().fine("made QueryPacket: " + queryPacket);
Result result = null;
CacheKey cacheKey = null;
if (cacheControl.useCache(query)) {
cacheKey = new CacheKey(queryPacket);
result = getCached(cacheKey, queryPacket.getQueryPacketData(), query);
}
if (result == null) {
result = doSearch2(query, queryPacket, cacheKey, execution);
if (isLoggingFine())
getLogger().fine("Result NOT retrieved from cache");
if (query.getTraceLevel() >= 1)
query.trace(getName() + " dispatch response: " + result, false, 1);
result.trace(getName());
}
return result;
}
/**
* Returns a cached result, or null if no result was cached for this key
*
* @param cacheKey the cache key created from the query packet
* @param queryPacketData a serialization of the query, to avoid having to recompute this, or null if not available
* @param query the query, used for tracing, lookup of result window and result creation
*/
private Result getCached(CacheKey cacheKey, QueryPacketData queryPacketData, Query query) {
if (query.getTraceLevel() >= 6) {
query.trace("Cache key hash: " + cacheKey.hashCode(), 6);
if (query.getTraceLevel() >= 8) {
query.trace("Cache key: " + HexDump.toHexString(cacheKey.getCopyOfFullKey()), 8);
}
}
try {
Result result = cacheLookupFirstPhase(cacheKey, queryPacketData, query, query.getOffset(), query.getHits(), query.getPresentation().getSummary());
if (result == null) return null;
if (isLoggingFine()) {
getLogger().fine("Result retrieved from cache: " + result);
}
if (query.getTraceLevel() >= 1) {
query.trace(getName() + " cached response: " + result, false, 1);
}
result.trace(getName());
return result;
}
catch (IOException e) {
Result result = new Result(query);
if (result.hits().getErrorHit() == null) {
result.hits().setError(ErrorMessage.createBackendCommunicationError(
"Fast Search (" + getName() + ") failed: " + e.getMessage()));
}
if (query.getTraceLevel() >= 1) {
query.trace(getName() + " error response: " + result, false, 1);
}
return result;
}
}
private List<Result> partitionHits(Result result, String summaryClass) {
List<Result> parts = new ArrayList<>();
TinyIdentitySet<Query> queryMap = new TinyIdentitySet<>(4);
for (Iterator<Hit> i = hitIterator(result); i.hasNext(); ) {
Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
if ( ! fastHit.isFilled(summaryClass)) {
Query q = fastHit.getQuery();
if (q == null) {
q = result.hits().getQuery();
}
int idx = queryMap.indexOf(q);
if (idx < 0) {
idx = queryMap.size();
Result r = new Result(q);
parts.add(r);
queryMap.add(q);
}
parts.get(idx).hits().add(fastHit);
}
}
}
return parts;
}
@Override
public void fill(Result result, String summaryClass, Execution execution) {
if (result.isFilled(summaryClass)) return;
List<Result> parts= partitionHits(result, summaryClass);
if (parts.size() > 0) {
for (Result r : parts) {
doPartialFill(r, summaryClass);
mergeErrorsInto(result, r);
}
result.hits().setSorted(false);
result.analyzeHits();
}
}
private void mergeErrorsInto(Result destination, Result source) {
ErrorHit eh = source.hits().getErrorHit();
if (eh != null) {
for (ErrorMessage error : eh.errors())
destination.hits().addError(error);
}
}
static void traceQuery(String sourceName, String type, Query query, int offset, int hits, int level, Optional<String> quotedSummaryClass) {
if ((query.getTraceLevel()<level) || query.properties().getBoolean(TRACE_DISABLE)) return;
StringBuilder s = new StringBuilder();
s.append(sourceName).append(" " + type + " to dispatch: ")
.append("query=[")
.append(query.getModel().getQueryTree().getRoot().toString())
.append("]");
s.append(" timeout=").append(query.getTimeout()).append("ms");
s.append(" offset=")
.append(offset)
.append(" hits=")
.append(hits);
if (query.getRanking().hasRankProfile()) {
s.append(" rankprofile[")
.append(query.getRanking().getProfile())
.append("]");
}
if (query.getRanking().getFreshness() != null) {
s.append(" freshness=")
.append(query.getRanking().getFreshness().getRefTime());
}
if (query.getRanking().getSorting() != null) {
s.append(" sortspec=")
.append(query.getRanking().getSorting().fieldOrders().toString());
}
if (query.getRanking().getLocation() != null) {
s.append(" location=")
.append(query.getRanking().getLocation().toString());
}
List<Grouping> grouping = GroupingExecutor.getGroupingList(query);
s.append(" grouping=").append(grouping.size()).append(" : ");
for(Grouping g : grouping) {
s.append(g.toString());
}
if ( ! query.getRanking().getProperties().isEmpty()) {
s.append(" rankproperties=")
.append(query.getRanking().getProperties().toString());
}
if ( ! query.getRanking().getFeatures().isEmpty()) {
s.append(" rankfeatures=")
.append(query.getRanking().getFeatures().toString());
}
if (query.getModel().getRestrict() != null) {
s.append(" restrict=").append(query.getModel().getRestrict().toString());
}
if (quotedSummaryClass.isPresent()) {
s.append(" summary=").append(quotedSummaryClass.get());
}
query.trace(s.toString(), false, level);
if (query.isTraceable(level + 1)) {
query.trace("Current state of query tree: "
+ new TextualQueryRepresentation(query.getModel().getQueryTree().getRoot()),
false, level+1);
}
if (query.isTraceable(level + 2)) {
query.trace("YQL+ representation: " + query.yqlRepresentation(), level+2);
}
}
protected void addMetaInfo(Query query, QueryPacketData queryPacketData, QueryResultPacket resultPacket, Result result, boolean fromCache) {
result.setTotalHitCount(resultPacket.getTotalDocumentCount());
if (resultPacket.getGroupData() != null) {
byte[] data = resultPacket.getGroupData();
ArrayList<Grouping> list = new ArrayList<>();
BufferSerializer buf = new BufferSerializer(new GrowableByteBuffer(ByteBuffer.wrap(data)));
int cnt = buf.getInt(null);
for (int i = 0; i < cnt; i++) {
Grouping g = new Grouping();
g.deserialize(buf);
list.add(g);
}
GroupingListHit hit = new GroupingListHit(list, getDocsumDefinitionSet(query));
hit.setQuery(result.getQuery());
hit.setSource(getName());
hit.setSourceNumber(sourceNumber);
hit.setQueryPacketData(queryPacketData);
result.hits().add(hit);
}
if (resultPacket.getCoverageFeature()) {
result.setCoverage(new Coverage(resultPacket.getCoverageDocs(), resultPacket.getActiveDocs())
.setSoonActive(resultPacket.getSoonActiveDocs())
.setDegradedReason(resultPacket.getDegradedReason()));
}
}
private boolean fillHit(FastHit hit, DocsumPacket packet, String summaryClass) {
if (packet != null) {
byte[] docsumdata = packet.getData();
if (docsumdata.length > 0) {
decodeSummary(summaryClass, hit, docsumdata);
return true;
}
}
return false;
}
/**
* Fills the hits.
*
* @return the number of hits that we did not return data for, i.e
* when things are working normally we return 0.
*/
protected int fillHits(Result result, int packetIndex, Packet[] packets, String summaryClass) throws IOException {
int skippedHits=0;
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
Hit hit = i.next();
if (hit instanceof FastHit && !hit.isFilled(summaryClass)) {
FastHit fastHit = (FastHit) hit;
ensureInstanceOf(DocsumPacket.class, packets[packetIndex], getName());
DocsumPacket docsum = (DocsumPacket) packets[packetIndex];
packetIndex++;
if ( ! fillHit(fastHit, docsum, summaryClass))
skippedHits++;
}
}
result.hits().setSorted(false);
return skippedHits;
}
/**
* Throws an IOException if the packet is not of the expected type
*/
protected static void ensureInstanceOf(Class<? extends BasicPacket> type, BasicPacket packet, String name) throws IOException {
if ((type.isAssignableFrom(packet.getClass()))) return;
if (packet instanceof ErrorPacket) {
ErrorPacket errorPacket=(ErrorPacket)packet;
if (errorPacket.getErrorCode() == 8)
throw new TimeoutException("Query timed out in " + name);
else
throw new IOException("Received error from backend in " + name + ": " + packet);
} else {
throw new IOException("Received " + packet + " when expecting " + type);
}
}
private boolean addCachedHits(Result result,
PacketWrapper packetWrapper,
String summaryClass,
List<DocumentInfo> documents) {
boolean filledAllOfEm = true;
Query myQuery = result.getQuery();
for (DocumentInfo document : documents) {
FastHit hit = new FastHit();
hit.setQuery(myQuery);
hit.setUseRowInIndexUri(useRowInIndexUri(result));
hit.setFillable();
hit.setCached(true);
extractDocumentInfo(hit, document);
DocsumPacket docsum = (DocsumPacket) packetWrapper.getPacket(document.getGlobalId(), document.getPartId(), summaryClass);
if (docsum != null) {
byte[] docsumdata = docsum.getData();
if (docsumdata.length > 0) {
decodeSummary(summaryClass, hit, docsumdata);
} else {
filledAllOfEm = false;
}
} else {
filledAllOfEm = false;
}
result.hits().add(hit);
}
return filledAllOfEm;
}
private boolean useRowInIndexUri(Result result) {
return ! ((result.getQuery().properties().getString(grouping) != null) || result.getQuery().properties().getBoolean(combinerows));
}
private void extractDocumentInfo(FastHit hit, DocumentInfo document) {
hit.setSourceNumber(sourceNumber);
hit.setSource(getName());
Number rank = document.getMetric();
hit.setRelevance(new Relevance(rank.doubleValue()));
hit.setDistributionKey(document.getDistributionKey());
hit.setGlobalId(document.getGlobalId());
hit.setPartId(document.getPartId(), rowBits);
}
protected PacketWrapper cacheLookupTwoPhase(CacheKey cacheKey, Result result, String summaryClass) {
Query query = result.getQuery();
PacketWrapper packetWrapper = cacheControl.lookup(cacheKey, query);
if (packetWrapper == null) {
return null;
}
if (packetWrapper.getNumPackets() != 0) {
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
DocsumPacketKey key = new DocsumPacketKey(fastHit.getGlobalId(), fastHit.getPartId(), summaryClass);
if (fillHit(fastHit,
(DocsumPacket) packetWrapper.getPacket(key),
summaryClass)) {
fastHit.setCached(true);
}
}
}
result.hits().setSorted(false);
result.analyzeHits();
}
return packetWrapper;
}
protected DocsumDefinitionSet getDocsumDefinitionSet(Query query) {
DocumentDatabase db = getDocumentDatabase(query);
return db.getDocsumDefinitionSet();
}
private void decodeSummary(String summaryClass, FastHit hit, byte[] docsumdata) {
DocumentDatabase db = getDocumentDatabase(hit.getQuery());
hit.setField(Hit.SDDOCNAME_FIELD, db.getName());
decodeSummary(summaryClass, hit, docsumdata, db.getDocsumDefinitionSet());
}
private void decodeSummary(String summaryClass, FastHit hit, byte[] docsumdata, DocsumDefinitionSet docsumSet) {
docsumSet.lazyDecode(summaryClass, docsumdata, hit);
hit.setFilled(summaryClass);
}
/**
* Creates unfilled hits from a List of DocumentInfo instances. Do note
* cacheKey should be available if a cache is active, even if the hit is not
* created from a cache in the current call path.
*
* @param queryPacketData binary data from first phase of search, or null
* @param cacheKey the key this hit should match in the packet cache, or null
*/
protected boolean addUnfilledHits(Result result, List<DocumentInfo> documents, boolean fromCache, QueryPacketData queryPacketData, CacheKey cacheKey) {
boolean allHitsOK = true;
Query myQuery = result.getQuery();
for (DocumentInfo document : documents) {
try {
FastHit hit = new FastHit();
hit.setQuery(myQuery);
if (queryPacketData != null)
hit.setQueryPacketData(queryPacketData);
hit.setCacheKey(cacheKey);
hit.setUseRowInIndexUri(useRowInIndexUri(result));
hit.setFillable();
hit.setCached(fromCache);
extractDocumentInfo(hit, document);
result.hits().add(hit);
} catch (ConfigurationException e) {
allHitsOK = false;
getLogger().log(LogLevel.WARNING, "Skipping hit", e);
} catch (Exception e) {
allHitsOK = false;
getLogger().log(LogLevel.ERROR, "Skipping malformed hit", e);
}
}
return allHitsOK;
}
@SuppressWarnings("rawtypes")
public static VespaBackEndSearcher getSearcher(String s) {
try {
Class c = Class.forName(s);
if (VespaBackEndSearcher.class.isAssignableFrom(c)) {
Constructor[] constructors = c.getConstructors();
for (Constructor constructor : constructors) {
Class[] parameters = constructor.getParameterTypes();
if (parameters.length == 0) {
return (VespaBackEndSearcher) constructor.newInstance();
}
}
throw new RuntimeException("Failed initializing " + s);
} else {
throw new RuntimeException(s + " is not com.yahoo.prelude.fastsearch.VespaBackEndSearcher");
}
} catch (Exception e) {
throw new RuntimeException("Failure loading class " + s + ", exception :" + e);
}
}
protected boolean isLoggingFine() {
return getLogger().isLoggable(Level.FINE);
}
public boolean isLocalDispatching() { return localDispatching; }
public void setLocalDispatching(boolean localDispatching) {
this.localDispatching = localDispatching;
}
} | |
It is not immediately obvious whether this is correct (though it probably is). In the provision controller we do this: https://git.corp.yahoo.com/vespa/hosted/blob/master/controller/provision-controller/src/main/java/com/yahoo/vespa/hosted/provision/controller/ProvisionController.java#L160 | public String getIpv4Address() {
String ipv4Regex = "^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$";
for (String ipAddress : ipAddresses) {
if (ipAddress.matches(ipv4Regex)) {
return ipAddress;
}
}
return null;
} | String ipv4Regex = "^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$"; | public String getIpv4Address() {
String ipv4Regex = "^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$";
for (String ipAddress : ipAddresses) {
if (ipAddress.matches(ipv4Regex)) {
return ipAddress;
}
}
return null;
} | class NodeRepoJsonModel {
@JsonProperty("minDiskAvailableGb")
private double minDiskAvailableGb;
@JsonProperty("minMainMemoryAvailableGb")
private double minMainMemoryAvailableGb;
@JsonProperty("minCpuCores")
private double minCpuCores;
@JsonProperty("fastDisk")
private boolean fastDisk;
@JsonProperty("ipAddresses")
private String[] ipAddresses;
@JsonProperty("additionalIpAddresses")
private String[] additionalIpAddresses;
@JsonProperty
private String hostname;
@JsonProperty
private String environment;
public String[] getAdditionalIpAddresses() {
return additionalIpAddresses;
}
public HardwareInfo copyToHardwareInfo() {
HardwareInfo hardwareInfo = new HardwareInfo();
hardwareInfo.setMinMainMemoryAvailableGb(this.minMainMemoryAvailableGb);
hardwareInfo.setMinDiskAvailableGb(this.minDiskAvailableGb);
hardwareInfo.setMinCpuCores((int) Math.round(this.minCpuCores));
hardwareInfo.setDiskType(this.fastDisk ? DiskType.FAST : DiskType.SLOW);
hardwareInfo.setIpv6Connection(getIpv6Address() != null);
return hardwareInfo;
}
public String getIpv6Address() {
String ipv6Regex = "^((?:[0-9A-Fa-f]{1,4}))((?::[0-9A-Fa-f]{1,4}))*::((?:[0-9A-Fa-f]{1,4}))((?::[0-9A-Fa-f]{1,4}))*|((?:[0-9A-Fa-f]{1,4}))((?::[0-9A-Fa-f]{1,4})){7}$";
for (String ipAddress : ipAddresses) {
if (ipAddress.matches(ipv6Regex)) {
return ipAddress;
}
}
return null;
}
public double getMinDiskAvailableGb() {
return minDiskAvailableGb;
}
public double getMinMainMemoryAvailableGb() {
return minMainMemoryAvailableGb;
}
public double getMinCpuCores() {
return minCpuCores;
}
public boolean isFastDisk() {
return fastDisk;
}
public String[] getIpAddresses() {
return ipAddresses;
}
public String getHostname() {
return hostname;
}
public String getEnvironment() {
return environment;
}
} | class NodeRepoJsonModel {
@JsonProperty("minDiskAvailableGb")
private double minDiskAvailableGb;
@JsonProperty("minMainMemoryAvailableGb")
private double minMainMemoryAvailableGb;
@JsonProperty("minCpuCores")
private double minCpuCores;
@JsonProperty("fastDisk")
private boolean fastDisk;
@JsonProperty("ipAddresses")
private String[] ipAddresses;
@JsonProperty("additionalIpAddresses")
private String[] additionalIpAddresses;
@JsonProperty
private String hostname;
@JsonProperty
private String environment;
public String[] getAdditionalIpAddresses() {
return additionalIpAddresses;
}
public HardwareInfo copyToHardwareInfo() {
HardwareInfo hardwareInfo = new HardwareInfo();
hardwareInfo.setMinMainMemoryAvailableGb(this.minMainMemoryAvailableGb);
hardwareInfo.setMinDiskAvailableGb(this.minDiskAvailableGb);
hardwareInfo.setMinCpuCores((int) Math.round(this.minCpuCores));
hardwareInfo.setDiskType(this.fastDisk ? DiskType.FAST : DiskType.SLOW);
hardwareInfo.setIpv6Connection(getIpv6Address() != null);
return hardwareInfo;
}
public String getIpv6Address() {
String ipv6Regex = "^((?:[0-9A-Fa-f]{1,4}))((?::[0-9A-Fa-f]{1,4}))*::((?:[0-9A-Fa-f]{1,4}))((?::[0-9A-Fa-f]{1,4}))*|((?:[0-9A-Fa-f]{1,4}))((?::[0-9A-Fa-f]{1,4})){7}$";
for (String ipAddress : ipAddresses) {
if (ipAddress.matches(ipv6Regex)) {
return ipAddress;
}
}
return null;
}
public double getMinDiskAvailableGb() {
return minDiskAvailableGb;
}
public double getMinMainMemoryAvailableGb() {
return minMainMemoryAvailableGb;
}
public double getMinCpuCores() {
return minCpuCores;
}
public boolean isFastDisk() {
return fastDisk;
}
public String[] getIpAddresses() {
return ipAddresses;
}
public String getHostname() {
return hostname;
}
public String getEnvironment() {
return environment;
}
} |
Same as above. | public void getConfig(ProviderConfig.Builder builder) {
if (providerSpec.path != null)
builder.path(providerSpec.path);
if (providerSpec.connectionParameters.readTimeout != null)
builder.readTimeout(providerSpec.connectionParameters.readTimeout );
if (providerSpec.connectionParameters.connectionTimeout != null)
builder.connectionTimeout(providerSpec.connectionParameters.connectionTimeout);
if (providerSpec.connectionParameters.connectionPoolTimeout != null)
builder.connectionPoolTimeout(providerSpec.connectionParameters.connectionPoolTimeout);
if (providerSpec.connectionParameters.retries != null)
builder.retries(providerSpec.connectionParameters.retries);
builder.node(getNodes(providerSpec.nodes));
if (providerSpec.ycaApplicationId != null) {
builder.yca(getCertificate(providerSpec));
}
} | if (providerSpec.ycaApplicationId != null) { | public void getConfig(ProviderConfig.Builder builder) {
if (providerSpec.path != null)
builder.path(providerSpec.path);
if (providerSpec.connectionParameters.readTimeout != null)
builder.readTimeout(providerSpec.connectionParameters.readTimeout );
if (providerSpec.connectionParameters.connectionTimeout != null)
builder.connectionTimeout(providerSpec.connectionParameters.connectionTimeout);
if (providerSpec.connectionParameters.connectionPoolTimeout != null)
builder.connectionPoolTimeout(providerSpec.connectionParameters.connectionPoolTimeout);
if (providerSpec.connectionParameters.retries != null)
builder.retries(providerSpec.connectionParameters.retries);
builder.node(getNodes(providerSpec.nodes));
if (providerSpec.ycaApplicationId != null) {
builder.yca(getCertificate(providerSpec));
}
} | class HttpProvider extends Provider implements ProviderConfig.Producer,
QrBinaryCacheConfig.Producer,
QrBinaryCacheRegionConfig.Producer {
private final HttpProviderSpec providerSpec;
private BinaryScaledAmount cacheSize;
public double getCacheWeight() {
return providerSpec.cacheWeight;
}
/**
* TODO: remove, for backward compatibility only.
*/
public void setCacheSize(BinaryScaledAmount cacheSize) {
this.cacheSize = cacheSize;
}
/*
* Config producer for the contained http searcher..
*/
public HttpProvider(ChainSpecification specWithoutInnerSearchers, FederationOptions federationOptions, HttpProviderSpec providerSpec) {
super(specWithoutInnerSearchers, federationOptions);
this.providerSpec = providerSpec;
}
@Override
private static Yca.Builder getCertificate(HttpProviderSpec providerSpec) {
Yca.Builder certificate = new Yca.Builder()
.applicationId(providerSpec.ycaApplicationId);
if (providerSpec.ycaProxy != null) {
certificate.useProxy(true);
if (providerSpec.ycaProxy.host != null) {
certificate.host(providerSpec.ycaProxy.host)
.port(providerSpec.ycaProxy.port);
}
}
if (providerSpec.ycaCertificateTtl != null) certificate.ttl(providerSpec.ycaCertificateTtl);
if (providerSpec.ycaRetryWait != null) certificate.ttl(providerSpec.ycaRetryWait);
return certificate;
}
private static List<Node.Builder> getNodes(List<HttpProviderSpec.Node> nodeSpecs) {
ArrayList<Node.Builder> nodes = new ArrayList<>();
for (HttpProviderSpec.Node node : nodeSpecs) {
nodes.add(
new Node.Builder()
.host(node.host)
.port(node.port));
}
return nodes;
}
public int cacheSizeMB() {
return providerSpec.cacheSizeMB != null ?
providerSpec.cacheSizeMB :
(int) cacheSize.as(BinaryPrefix.mega);
}
@Override
public void getConfig(QrBinaryCacheConfig.Builder builder) {
builder.cache_size(cacheSizeMB());
}
@Override
public void getConfig(QrBinaryCacheRegionConfig.Builder builder) {
builder.region_size(cacheSizeMB());
}
} | class HttpProvider extends Provider implements ProviderConfig.Producer,
QrBinaryCacheConfig.Producer,
QrBinaryCacheRegionConfig.Producer {
private final HttpProviderSpec providerSpec;
private BinaryScaledAmount cacheSize;
public double getCacheWeight() {
return providerSpec.cacheWeight;
}
/**
* TODO: remove, for backward compatibility only.
*/
public void setCacheSize(BinaryScaledAmount cacheSize) {
this.cacheSize = cacheSize;
}
/*
* Config producer for the contained http searcher..
*/
public HttpProvider(ChainSpecification specWithoutInnerSearchers, FederationOptions federationOptions, HttpProviderSpec providerSpec) {
super(specWithoutInnerSearchers, federationOptions);
this.providerSpec = providerSpec;
}
@Override
private static Yca.Builder getCertificate(HttpProviderSpec providerSpec) {
Yca.Builder certificate = new Yca.Builder()
.applicationId(providerSpec.ycaApplicationId);
if (providerSpec.ycaProxy != null) {
certificate.useProxy(true);
if (providerSpec.ycaProxy.host != null) {
certificate.host(providerSpec.ycaProxy.host)
.port(providerSpec.ycaProxy.port);
}
}
if (providerSpec.ycaCertificateTtl != null) certificate.ttl(providerSpec.ycaCertificateTtl);
if (providerSpec.ycaRetryWait != null) certificate.ttl(providerSpec.ycaRetryWait);
return certificate;
}
private static List<Node.Builder> getNodes(List<HttpProviderSpec.Node> nodeSpecs) {
ArrayList<Node.Builder> nodes = new ArrayList<>();
for (HttpProviderSpec.Node node : nodeSpecs) {
nodes.add(
new Node.Builder()
.host(node.host)
.port(node.port));
}
return nodes;
}
public int cacheSizeMB() {
return providerSpec.cacheSizeMB != null ?
providerSpec.cacheSizeMB :
(int) cacheSize.as(BinaryPrefix.mega);
}
@Override
public void getConfig(QrBinaryCacheConfig.Builder builder) {
builder.cache_size(cacheSizeMB());
}
@Override
public void getConfig(QrBinaryCacheRegionConfig.Builder builder) {
builder.region_size(cacheSizeMB());
}
} |
To be entirely correct we may want to take into account the 10 cpu shares reserved for the node admin. Discussed with Yngve - let's just note this complication as a comment here for later, if applicable. | public void updateContainerNodeMetrics() {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null || containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("state", nodeSpec.nodeState.toString())
.add("parentHostname", environment.getParentHostHostname());
Dimensions dimensions = dimensionsBuilder.build();
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.getDiskUsageFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores ->
totalNumCpuCores * cpuPercentageOfHost / containerNumCpuCores);
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", memoryPercentUsed);
cpuPercentageOfAllocated.ifPresent(cpuUtil -> systemMetricsBuilder.withMetric("cpu.util", cpuUtil));
diskTotalBytes.ifPresent(diskLimit -> systemMetricsBuilder.withMetric("disk.limit", diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskPercentUsed.ifPresent(diskUtil -> systemMetricsBuilder.withMetric("disk.util", diskUtil));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
} | Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores -> | public void updateContainerNodeMetrics() {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null || containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("state", nodeSpec.nodeState.toString())
.add("parentHostname", environment.getParentHostHostname());
Dimensions dimensions = dimensionsBuilder.build();
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.getDiskUsageFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores ->
totalNumCpuCores * cpuPercentageOfHost / containerNumCpuCores);
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", memoryPercentUsed);
cpuPercentageOfAllocated.ifPresent(cpuUtil -> systemMetricsBuilder.withMetric("cpu.util", cpuUtil));
diskTotalBytes.ifPresent(diskLimit -> systemMetricsBuilder.withMetric("disk.limit", diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskPercentUsed.ifPresent(diskUtil -> systemMetricsBuilder.withMetric("disk.util", diskUtil));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Future<?> currentFilebeatRestarter;
private boolean resumeScriptRun = false;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
containerName, "service", service, "restart");
if (!processResult.isSuccess()) {
logger.error("Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
logger.error("Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
try {
filebeatRestarter.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.error("Interrupted; Could not stop filebeatrestarter thread");
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! resumeScriptRun) {
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
resumeScriptRun = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter();
currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS);
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
resumeScriptRun = false;
containerState = UNKNOWN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true);
dockerOperations.removeContainer(existingContainer);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (DockerException e) {
numberOfUnhandledException++;
containerState = UNKNOWN;
logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
if (container.isPresent()) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
});
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(containerName);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Future<?> currentFilebeatRestarter;
private boolean resumeScriptRun = false;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
containerName, "service", service, "restart");
if (!processResult.isSuccess()) {
logger.error("Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
logger.error("Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
try {
filebeatRestarter.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.error("Interrupted; Could not stop filebeatrestarter thread");
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! resumeScriptRun) {
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
resumeScriptRun = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter();
currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS);
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
resumeScriptRun = false;
containerState = UNKNOWN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true);
dockerOperations.removeContainer(existingContainer);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (DockerException e) {
numberOfUnhandledException++;
containerState = UNKNOWN;
logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
if (container.isPresent()) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
});
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(containerName);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
We allocate 10 shares per CPU core, and `node-admin` is allocated exactly 10 shares, so this could actually be easily taken into account by scaling `totalNumCpuCores` with `totalNumCpuCores`/(`totalNumCpuCores` + 1). EDIT: Math is hard. | public void updateContainerNodeMetrics() {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null || containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("state", nodeSpec.nodeState.toString())
.add("parentHostname", environment.getParentHostHostname());
Dimensions dimensions = dimensionsBuilder.build();
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.getDiskUsageFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores ->
totalNumCpuCores * cpuPercentageOfHost / containerNumCpuCores);
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", memoryPercentUsed);
cpuPercentageOfAllocated.ifPresent(cpuUtil -> systemMetricsBuilder.withMetric("cpu.util", cpuUtil));
diskTotalBytes.ifPresent(diskLimit -> systemMetricsBuilder.withMetric("disk.limit", diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskPercentUsed.ifPresent(diskUtil -> systemMetricsBuilder.withMetric("disk.util", diskUtil));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
} | Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores -> | public void updateContainerNodeMetrics() {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null || containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("state", nodeSpec.nodeState.toString())
.add("parentHostname", environment.getParentHostHostname());
Dimensions dimensions = dimensionsBuilder.build();
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.getDiskUsageFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores ->
totalNumCpuCores * cpuPercentageOfHost / containerNumCpuCores);
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", memoryPercentUsed);
cpuPercentageOfAllocated.ifPresent(cpuUtil -> systemMetricsBuilder.withMetric("cpu.util", cpuUtil));
diskTotalBytes.ifPresent(diskLimit -> systemMetricsBuilder.withMetric("disk.limit", diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskPercentUsed.ifPresent(diskUtil -> systemMetricsBuilder.withMetric("disk.util", diskUtil));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Future<?> currentFilebeatRestarter;
private boolean resumeScriptRun = false;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
containerName, "service", service, "restart");
if (!processResult.isSuccess()) {
logger.error("Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
logger.error("Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
try {
filebeatRestarter.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.error("Interrupted; Could not stop filebeatrestarter thread");
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! resumeScriptRun) {
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
resumeScriptRun = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter();
currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS);
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
resumeScriptRun = false;
containerState = UNKNOWN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true);
dockerOperations.removeContainer(existingContainer);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (DockerException e) {
numberOfUnhandledException++;
containerState = UNKNOWN;
logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
if (container.isPresent()) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
});
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(containerName);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Future<?> currentFilebeatRestarter;
private boolean resumeScriptRun = false;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
containerName, "service", service, "restart");
if (!processResult.isSuccess()) {
logger.error("Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
logger.error("Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
try {
filebeatRestarter.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.error("Interrupted; Could not stop filebeatrestarter thread");
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! resumeScriptRun) {
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
resumeScriptRun = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter();
currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS);
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
resumeScriptRun = false;
containerState = UNKNOWN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true);
dockerOperations.removeContainer(existingContainer);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (DockerException e) {
numberOfUnhandledException++;
containerState = UNKNOWN;
logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
if (container.isPresent()) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
});
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(containerName);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Yes, the "complexity" is e.g. interpreting a CPU core we promise as part of a flavor. E.g. knowing that if you have 48 cores according to the flavor, and see ~100% CPU utilization, but you actually will get about 1 core less. E.g. perhaps we shouldn't promise 48 CPU flavor for C-77E, etc, etc. | public void updateContainerNodeMetrics() {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null || containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("state", nodeSpec.nodeState.toString())
.add("parentHostname", environment.getParentHostHostname());
Dimensions dimensions = dimensionsBuilder.build();
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.getDiskUsageFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores ->
totalNumCpuCores * cpuPercentageOfHost / containerNumCpuCores);
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", memoryPercentUsed);
cpuPercentageOfAllocated.ifPresent(cpuUtil -> systemMetricsBuilder.withMetric("cpu.util", cpuUtil));
diskTotalBytes.ifPresent(diskLimit -> systemMetricsBuilder.withMetric("disk.limit", diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskPercentUsed.ifPresent(diskUtil -> systemMetricsBuilder.withMetric("disk.util", diskUtil));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
} | Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores -> | public void updateContainerNodeMetrics() {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null || containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("state", nodeSpec.nodeState.toString())
.add("parentHostname", environment.getParentHostHostname());
Dimensions dimensions = dimensionsBuilder.build();
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final long bytesInGB = 1 << 30;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final Optional<Long> diskTotalBytes = nodeSpec.minDiskAvailableGb.map(size -> (long) (size * bytesInGB));
final Optional<Long> diskTotalBytesUsed = storageMaintainer.flatMap(maintainer -> maintainer
.getDiskUsageFor(containerName));
double cpuPercentageOfHost = lastCpuMetric.getCpuUsagePercentage(cpuContainerTotalTime, cpuSystemTotalTime);
Optional<Double> cpuPercentageOfAllocated = nodeSpec.minCpuCores.map(containerNumCpuCores ->
totalNumCpuCores * cpuPercentageOfHost / containerNumCpuCores);
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryPercentUsed = 100.0 * memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskPercentUsed = diskTotalBytes.flatMap(total -> diskTotalBytesUsed.map(used -> 100.0 * used / total));
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", memoryPercentUsed);
cpuPercentageOfAllocated.ifPresent(cpuUtil -> systemMetricsBuilder.withMetric("cpu.util", cpuUtil));
diskTotalBytes.ifPresent(diskLimit -> systemMetricsBuilder.withMetric("disk.limit", diskLimit));
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskPercentUsed.ifPresent(diskUtil -> systemMetricsBuilder.withMetric("disk.util", diskUtil));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Future<?> currentFilebeatRestarter;
private boolean resumeScriptRun = false;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
containerName, "service", service, "restart");
if (!processResult.isSuccess()) {
logger.error("Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
logger.error("Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
try {
filebeatRestarter.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.error("Interrupted; Could not stop filebeatrestarter thread");
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! resumeScriptRun) {
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
resumeScriptRun = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter();
currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS);
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
resumeScriptRun = false;
containerState = UNKNOWN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true);
dockerOperations.removeContainer(existingContainer);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (DockerException e) {
numberOfUnhandledException++;
containerState = UNKNOWN;
logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
if (container.isPresent()) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
});
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(containerName);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} | class NodeAgentImpl implements NodeAgent {
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final String hostname;
private final ContainerName containerName;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final Optional<StorageMaintainer> storageMaintainer;
private final Environment environment;
private final Clock clock;
private final Optional<AclMaintainer> aclMaintainer;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private long delaysBetweenEachConvergeMillis = 30_000;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private final Consumer<String> serviceRestarter;
private Future<?> currentFilebeatRestarter;
private boolean resumeScriptRun = false;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final Optional<StorageMaintainer> storageMaintainer,
final Environment environment,
final Clock clock,
final Optional<AclMaintainer> aclMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.containerName = ContainerName.fromHostname(hostName);
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.environment = environment;
this.clock = clock;
this.aclMaintainer = aclMaintainer;
this.lastConverge = clock.instant();
this.serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
containerName, "service", service, "restart");
if (!processResult.isSuccess()) {
logger.error("Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
logger.error("Failed to restart service " + service, e);
}
};
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start(int intervalMillis) {
addDebugMessage("Starting with interval " + intervalMillis + "ms");
delaysBetweenEachConvergeMillis = intervalMillis;
if (loopThread != null) {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
loopThread.setName("tick-" + hostname);
loopThread.start();
}
@Override
public void stop() {
addDebugMessage("Stopping");
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
try {
loopThread.join(10000);
if (loopThread.isAlive()) {
logger.error("Could not stop host thread " + hostname);
}
} catch (InterruptedException e1) {
logger.error("Interrupted; Could not stop host thread " + hostname);
}
try {
filebeatRestarter.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.error("Interrupted; Could not stop filebeatrestarter thread");
}
}
private void runLocalResumeScriptIfNeeded(final ContainerNodeSpec nodeSpec) {
if (! resumeScriptRun) {
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
resumeScriptRun = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.ifPresent(AclMaintainer::run);
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter();
currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS);
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
maintainer.writeFilebeatConfig(containerName, nodeSpec);
});
resumeScriptRun = false;
containerState = UNKNOWN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true);
dockerOperations.removeContainer(existingContainer);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.shouldScheduleDownloadOfImage(nodeSpec.wantedDockerImage.get())) {
if (nodeSpec.wantedDockerImage.get().equals(imageBeingDownloaded)) {
return;
}
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
dockerOperations.scheduleDownloadOfImage(containerName, imageBeingDownloaded, this::signalWorkToBeDone);
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = delaysBetweenEachConvergeMillis - Duration.between(lastConverge, clock.instant()).toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (DockerException e) {
numberOfUnhandledException++;
containerState = UNKNOWN;
logger.error("Caught a DockerExecption, resetting containerState to " + containerState, e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
void converge() {
final ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(hostname)
.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
if (container.isPresent()) {
storageMaintainer.ifPresent(maintainer -> {
maintainer.writeMetricsConfig(containerName, nodeSpec);
});
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.ifPresent(maintainer -> {
maintainer.removeOldFilesFromNode(containerName);
maintainer.handleCoreDumpsForContainer(containerName, nodeSpec, environment);
});
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded(nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
storageMaintainer.ifPresent(maintainer -> maintainer.removeOldFilesFromNode(containerName));
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.ifPresent(maintainer -> maintainer.archiveNodeData(containerName));
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
@SuppressWarnings("unchecked")
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(containerName);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
double getCpuUsagePercentage(long currentContainerUsage, long currentSystemUsage) {
long deltaSystemUsage = currentSystemUsage - totalSystemUsage;
double cpuUsagePct = (deltaSystemUsage == 0 || totalSystemUsage == 0) ?
0 : 100.0 * (currentContainerUsage - totalContainerUsage) / deltaSystemUsage;
totalContainerUsage = currentContainerUsage;
totalSystemUsage = currentSystemUsage;
return cpuUsagePct;
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
} |
Consider adding a method on value returning an Optional.of(asString(this)), or empty if NIX. | private Node applyField(String name, Inspector value) {
switch (name) {
case "convergedStateVersion" :
return node;
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), nodeRepository.clock().instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
Version versionFromImage = Optional.of(asString(value))
.filter(s -> !s.isEmpty())
.map(DockerImage::new)
.map(DockerImage::tagAsVersion)
.orElse(Version.emptyVersion);
return node.with(node.status().withVespaVersion(versionFromImage));
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentHostedVersion" :
return node;
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case HARDWARE_FAILURE_TYPE:
Optional<String> hardwareFailure = value.type().equals(Type.NIX) ?
Optional.empty() : Optional.of(asString(value));
return node.with(node.status().withHardwareFailure(hardwareFailure));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return node.withIpAddresses(asStringSet(value));
case "additionalIpAddresses" :
return node.withAdditionalIpAddresses(asStringSet(value));
case "wantToRetire" :
return node.with(node.status().withWantToRetire(asBoolean(value)));
case "wantToDeprovision" :
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "hardwareDivergence" :
Optional<String> hardwareDivergence = value.type().equals(Type.NIX) ?
Optional.empty() : Optional.of(asString(value));
return node.with(node.status().withHardwareDivergence(hardwareDivergence));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | Optional<String> hardwareFailure = value.type().equals(Type.NIX) ? | private Node applyField(String name, Inspector value) {
switch (name) {
case "convergedStateVersion" :
return node;
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), nodeRepository.clock().instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
Version versionFromImage = Optional.of(asString(value))
.filter(s -> !s.isEmpty())
.map(DockerImage::new)
.map(DockerImage::tagAsVersion)
.orElse(Version.emptyVersion);
return node.with(node.status().withVespaVersion(versionFromImage));
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentHostedVersion" :
return node;
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case HARDWARE_FAILURE_DESCRIPTION:
case HARDWARE_FAILURE_TYPE:
return node.with(node.status().withHardwareFailureDescription(asOptionalString(value)));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return node.withIpAddresses(asStringSet(value));
case "additionalIpAddresses" :
return node.withAdditionalIpAddresses(asStringSet(value));
case "wantToRetire" :
return node.with(node.status().withWantToRetire(asBoolean(value)));
case "wantToDeprovision" :
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "hardwareDivergence" :
return node.with(node.status().withHardwareDivergence(asOptionalString(value)));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | class NodePatcher {
public static final String HARDWARE_FAILURE_TYPE = "hardwareFailureType";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final NodeRepository nodeRepository;
private Node node;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, NodeRepository nodeRepository) {
try {
this.nodeFlavors = nodeFlavors;
inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
this.node = node;
this.nodeRepository = nodeRepository;
}
catch (IOException e) {
throw new RuntimeException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(name, value);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
} );
List<Node> nodes = new ArrayList<>();
if (node.type() == NodeType.host) {
nodes.addAll(modifiedDockerChildNodes());
}
nodes.add(node);
return nodes;
}
private List<Node> modifiedDockerChildNodes() {
List<Node> children = nodeRepository.getChildNodes(node.hostname());
boolean modified = false;
if (inspector.field(HARDWARE_FAILURE_TYPE).valid()) {
Optional<String> hardwareFailure = inspector.type().equals(Type.NIX) ?
Optional.empty() : Optional.of(inspector.field(HARDWARE_FAILURE_TYPE).asString());
modified = true;
children = children.stream()
.map(node -> node.with(node.status().withHardwareFailure(hardwareFailure)))
.collect(Collectors.toList());
}
return modified ? children : new ArrayList<>();
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} | class NodePatcher {
public static final String HARDWARE_FAILURE_TYPE = "hardwareFailureType";
public static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final NodeRepository nodeRepository;
private Node node;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, NodeRepository nodeRepository) {
try {
this.nodeFlavors = nodeFlavors;
inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
this.node = node;
this.nodeRepository = nodeRepository;
}
catch (IOException e) {
throw new RuntimeException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(name, value);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
} );
List<Node> nodes = new ArrayList<>();
if (node.type() == NodeType.host) {
nodes.addAll(modifiedDockerChildNodes());
}
nodes.add(node);
return nodes;
}
private List<Node> modifiedDockerChildNodes() {
List<Node> children = nodeRepository.getChildNodes(node.hostname());
boolean modified = false;
if (inspector.field(HARDWARE_FAILURE_TYPE).valid()) {
Optional<String> hardwareFailure = asOptionalString(inspector.field(HARDWARE_FAILURE_TYPE));
modified = true;
children = children.stream()
.map(node -> node.with(node.status().withHardwareFailureDescription(hardwareFailure)))
.collect(Collectors.toList());
} else if (inspector.field(HARDWARE_FAILURE_DESCRIPTION).valid()) {
Optional<String> hardwareFailure = asOptionalString(inspector.field(HARDWARE_FAILURE_DESCRIPTION));
modified = true;
children = children.stream()
.map(node -> node.with(node.status().withHardwareFailureDescription(hardwareFailure)))
.collect(Collectors.toList());
}
return modified ? children : new ArrayList<>();
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} |
Fixed. | private Node applyField(String name, Inspector value) {
switch (name) {
case "convergedStateVersion" :
return node;
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), nodeRepository.clock().instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
Version versionFromImage = Optional.of(asString(value))
.filter(s -> !s.isEmpty())
.map(DockerImage::new)
.map(DockerImage::tagAsVersion)
.orElse(Version.emptyVersion);
return node.with(node.status().withVespaVersion(versionFromImage));
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentHostedVersion" :
return node;
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case HARDWARE_FAILURE_TYPE:
Optional<String> hardwareFailure = value.type().equals(Type.NIX) ?
Optional.empty() : Optional.of(asString(value));
return node.with(node.status().withHardwareFailure(hardwareFailure));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return node.withIpAddresses(asStringSet(value));
case "additionalIpAddresses" :
return node.withAdditionalIpAddresses(asStringSet(value));
case "wantToRetire" :
return node.with(node.status().withWantToRetire(asBoolean(value)));
case "wantToDeprovision" :
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "hardwareDivergence" :
Optional<String> hardwareDivergence = value.type().equals(Type.NIX) ?
Optional.empty() : Optional.of(asString(value));
return node.with(node.status().withHardwareDivergence(hardwareDivergence));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | Optional<String> hardwareFailure = value.type().equals(Type.NIX) ? | private Node applyField(String name, Inspector value) {
switch (name) {
case "convergedStateVersion" :
return node;
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), nodeRepository.clock().instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
Version versionFromImage = Optional.of(asString(value))
.filter(s -> !s.isEmpty())
.map(DockerImage::new)
.map(DockerImage::tagAsVersion)
.orElse(Version.emptyVersion);
return node.with(node.status().withVespaVersion(versionFromImage));
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentHostedVersion" :
return node;
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case HARDWARE_FAILURE_DESCRIPTION:
case HARDWARE_FAILURE_TYPE:
return node.with(node.status().withHardwareFailureDescription(asOptionalString(value)));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return node.withIpAddresses(asStringSet(value));
case "additionalIpAddresses" :
return node.withAdditionalIpAddresses(asStringSet(value));
case "wantToRetire" :
return node.with(node.status().withWantToRetire(asBoolean(value)));
case "wantToDeprovision" :
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "hardwareDivergence" :
return node.with(node.status().withHardwareDivergence(asOptionalString(value)));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | class NodePatcher {
public static final String HARDWARE_FAILURE_TYPE = "hardwareFailureType";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final NodeRepository nodeRepository;
private Node node;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, NodeRepository nodeRepository) {
try {
this.nodeFlavors = nodeFlavors;
inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
this.node = node;
this.nodeRepository = nodeRepository;
}
catch (IOException e) {
throw new RuntimeException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(name, value);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
} );
List<Node> nodes = new ArrayList<>();
if (node.type() == NodeType.host) {
nodes.addAll(modifiedDockerChildNodes());
}
nodes.add(node);
return nodes;
}
private List<Node> modifiedDockerChildNodes() {
List<Node> children = nodeRepository.getChildNodes(node.hostname());
boolean modified = false;
if (inspector.field(HARDWARE_FAILURE_TYPE).valid()) {
Optional<String> hardwareFailure = inspector.type().equals(Type.NIX) ?
Optional.empty() : Optional.of(inspector.field(HARDWARE_FAILURE_TYPE).asString());
modified = true;
children = children.stream()
.map(node -> node.with(node.status().withHardwareFailure(hardwareFailure)))
.collect(Collectors.toList());
}
return modified ? children : new ArrayList<>();
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} | class NodePatcher {
public static final String HARDWARE_FAILURE_TYPE = "hardwareFailureType";
public static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final NodeRepository nodeRepository;
private Node node;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, NodeRepository nodeRepository) {
try {
this.nodeFlavors = nodeFlavors;
inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
this.node = node;
this.nodeRepository = nodeRepository;
}
catch (IOException e) {
throw new RuntimeException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(name, value);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
} );
List<Node> nodes = new ArrayList<>();
if (node.type() == NodeType.host) {
nodes.addAll(modifiedDockerChildNodes());
}
nodes.add(node);
return nodes;
}
private List<Node> modifiedDockerChildNodes() {
List<Node> children = nodeRepository.getChildNodes(node.hostname());
boolean modified = false;
if (inspector.field(HARDWARE_FAILURE_TYPE).valid()) {
Optional<String> hardwareFailure = asOptionalString(inspector.field(HARDWARE_FAILURE_TYPE));
modified = true;
children = children.stream()
.map(node -> node.with(node.status().withHardwareFailureDescription(hardwareFailure)))
.collect(Collectors.toList());
} else if (inspector.field(HARDWARE_FAILURE_DESCRIPTION).valid()) {
Optional<String> hardwareFailure = asOptionalString(inspector.field(HARDWARE_FAILURE_DESCRIPTION));
modified = true;
children = children.stream()
.map(node -> node.with(node.status().withHardwareFailureDescription(hardwareFailure)))
.collect(Collectors.toList());
}
return modified ? children : new ArrayList<>();
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} |
This comment makes no sense. If there are more reasons to need the query in the future, where do I put those checks? | protected boolean summaryNeedsQuery(Query query) {
if (query.getRanking().getQueryCache()) return false;
DocumentDatabase documentDb = getDocumentDatabase(query);
DocsumDefinition docsumDefinition = documentDb.getDocsumDefinitionSet().getDocsumDefinition(query.getPresentation().getSummary());
if (docsumDefinition == null) return true;
if (docsumDefinition.isDynamic()) return true;
RankProfile rankProfile = documentDb.rankProfiles().get(query.getRanking().getProfile());
if (rankProfile == null) return true;
if (rankProfile.hasSummaryFeatures()) return true;
if (query.getRanking().getListFeatures()) return true;
return false;
} | protected boolean summaryNeedsQuery(Query query) {
if (query.getRanking().getQueryCache()) return false;
DocumentDatabase documentDb = getDocumentDatabase(query);
DocsumDefinition docsumDefinition = documentDb.getDocsumDefinitionSet().getDocsumDefinition(query.getPresentation().getSummary());
if (docsumDefinition == null) return true;
if (docsumDefinition.isDynamic()) return true;
RankProfile rankProfile = documentDb.rankProfiles().get(query.getRanking().getProfile());
if (rankProfile == null) return true;
if (rankProfile.hasSummaryFeatures()) return true;
if (query.getRanking().getListFeatures()) return true;
return false;
} | class VespaBackEndSearcher extends PingableSearcher {
private static final CompoundName grouping=new CompoundName("grouping");
private static final CompoundName combinerows=new CompoundName("combinerows");
/** If this is turned on this will fill summaries by dispatching directly to search nodes over RPC */
private final static CompoundName dispatchSummaries = new CompoundName("dispatch.summaries");
protected static final CompoundName PACKET_COMPRESSION_LIMIT = new CompoundName("packetcompressionlimit");
protected static final CompoundName PACKET_COMPRESSION_TYPE = new CompoundName("packetcompressiontype");
protected static final CompoundName TRACE_DISABLE = new CompoundName("trace.disable");
/** The set of all document databases available in the backend handled by this searcher */
private Map<String, DocumentDatabase> documentDbs = new LinkedHashMap<>();
private DocumentDatabase defaultDocumentDb = null;
/** Default docsum class. null means "unset" and is the default value */
private String defaultDocsumClass = null;
/** Returns an iterator which returns all hits below this result **/
protected Iterator<Hit> hitIterator(Result result) {
return result.hits().unorderedDeepIterator();
}
private boolean localDispatching = true;
/** The name of this source */
private String name;
/** Cache wrapper */
protected CacheControl cacheControl = null;
/**
* The number of last significant bits in the partId which specifies the
* row number in this backend,
* the rest specifies the column. 0 if not known.
*/
private int rowBits = 0;
/** Searchcluster number */
private int sourceNumber;
protected final String getName() { return name; }
protected final String getDefaultDocsumClass() { return defaultDocsumClass; }
/** Sets default document summary class. Default is null */
private void setDefaultDocsumClass(String docsumClass) { defaultDocsumClass = docsumClass; }
/** Returns the packet cache controller of this */
public final CacheControl getCacheControl() { return cacheControl; }
/**
* Searches a search cluster
* This is an endpoint - searchers will never propagate the search to any nested searcher.
*
* @param query the query to search
* @param queryPacket the serialized query representation to pass to the search cluster
* @param cacheKey the cache key created from the query packet, or null if caching is not used
* @param execution the query execution context
*/
protected abstract Result doSearch2(Query query, QueryPacket queryPacket, CacheKey cacheKey, Execution execution);
protected abstract void doPartialFill(Result result, String summaryClass);
protected static boolean wantsRPCSummarFill(Query query) {
return query.properties().getBoolean(dispatchSummaries);
}
/**
* Returns whether we need to send the query when fetching summaries.
* This is necessary if the query requests summary features or dynamic snippeting
*/
private Result cacheLookupFirstPhase(CacheKey key, QueryPacketData queryPacketData, Query query, int offset, int hits, String summaryClass) throws IOException {
PacketWrapper packetWrapper = cacheControl.lookup(key, query);
if (packetWrapper == null) return null;
List<DocumentInfo> documents = packetWrapper.getDocuments(offset, hits);
if (documents == null) return null;
if (query.getPresentation().getSummary() == null)
query.getPresentation().setSummary(getDefaultDocsumClass());
Result result = new Result(query);
QueryResultPacket resultPacket = packetWrapper.getFirstResultPacket();
addMetaInfo(query, queryPacketData, resultPacket, result, true);
if (packetWrapper.getNumPackets() == 0)
addUnfilledHits(result, documents, true, queryPacketData, key);
else
addCachedHits(result, packetWrapper, summaryClass, documents);
return result;
}
protected DocumentDatabase getDocumentDatabase(Query query) {
if (query.getModel().getRestrict().size() == 1) {
String docTypeName = (String)query.getModel().getRestrict().toArray()[0];
DocumentDatabase db = documentDbs.get(docTypeName);
if (db != null) {
return db;
}
}
return defaultDocumentDb;
}
private void resolveDocumentDatabase(Query query) {
DocumentDatabase docDb = getDocumentDatabase(query);
if (docDb != null) {
query.getModel().setDocumentDb(docDb.getName());
}
}
public final void init(SummaryParameters docSumParams, ClusterParams clusterParams, CacheParams cacheParams,
DocumentdbInfoConfig documentdbInfoConfig) {
this.name = clusterParams.searcherName;
this.sourceNumber = clusterParams.clusterNumber;
this.rowBits = clusterParams.rowBits;
Validator.ensureNotNull("Name of Vespa backend integration", getName());
setDefaultDocsumClass(docSumParams.defaultClass);
if (documentdbInfoConfig != null) {
for (DocumentdbInfoConfig.Documentdb docDb : documentdbInfoConfig.documentdb()) {
DocumentDatabase db = new DocumentDatabase(docDb, clusterParams.emulation);
if (documentDbs.isEmpty()) {
defaultDocumentDb = db;
}
documentDbs.put(docDb.name(), db);
}
}
if (cacheParams.cacheControl == null) {
this.cacheControl = new CacheControl(cacheParams.cacheMegaBytes, cacheParams.cacheTimeOutSeconds);
} else {
this.cacheControl = cacheParams.cacheControl;
}
}
protected void transformQuery(Query query) { }
public Result search(Query query, Execution execution) {
Item root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem) {
return new Result(query, ErrorMessage.createNullQuery(query.getHttpRequest().getUri().toString()));
}
if (wantsRPCSummarFill(query) && summaryNeedsQuery(query)) {
return new Result(query, ErrorMessage.createInvalidQueryParameter(
"When using dispatch.summaries and your summary/rankprofile require the query, " +
" you need to enable ranking.queryCache."));
}
QueryRewrite.optimizeByRestrict(query);
QueryRewrite.optimizeAndNot(query);
QueryRewrite.collapseSingleComposites(query);
root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem)
return new Result(query);
resolveDocumentDatabase(query);
transformQuery(query);
traceQuery(name, "search", query, query.getOffset(), query.getHits(), 1, Optional.<String>empty());
root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem)
return new Result(query);
QueryPacket queryPacket = QueryPacket.create(query);
int compressionLimit = query.properties().getInteger(PACKET_COMPRESSION_LIMIT, 0);
queryPacket.setCompressionLimit(compressionLimit);
if (compressionLimit != 0)
queryPacket.setCompressionType(query.properties().getString(PACKET_COMPRESSION_TYPE, "lz4"));
if (isLoggingFine())
getLogger().fine("made QueryPacket: " + queryPacket);
Result result = null;
CacheKey cacheKey = null;
if (cacheControl.useCache(query)) {
cacheKey = new CacheKey(queryPacket);
result = getCached(cacheKey, queryPacket.getQueryPacketData(), query);
}
if (result == null) {
result = doSearch2(query, queryPacket, cacheKey, execution);
if (isLoggingFine())
getLogger().fine("Result NOT retrieved from cache");
if (query.getTraceLevel() >= 1)
query.trace(getName() + " dispatch response: " + result, false, 1);
result.trace(getName());
}
return result;
}
/**
* Returns a cached result, or null if no result was cached for this key
*
* @param cacheKey the cache key created from the query packet
* @param queryPacketData a serialization of the query, to avoid having to recompute this, or null if not available
* @param query the query, used for tracing, lookup of result window and result creation
*/
private Result getCached(CacheKey cacheKey, QueryPacketData queryPacketData, Query query) {
if (query.getTraceLevel() >= 6) {
query.trace("Cache key hash: " + cacheKey.hashCode(), 6);
if (query.getTraceLevel() >= 8) {
query.trace("Cache key: " + HexDump.toHexString(cacheKey.getCopyOfFullKey()), 8);
}
}
try {
Result result = cacheLookupFirstPhase(cacheKey, queryPacketData, query, query.getOffset(), query.getHits(), query.getPresentation().getSummary());
if (result == null) return null;
if (isLoggingFine()) {
getLogger().fine("Result retrieved from cache: " + result);
}
if (query.getTraceLevel() >= 1) {
query.trace(getName() + " cached response: " + result, false, 1);
}
result.trace(getName());
return result;
}
catch (IOException e) {
Result result = new Result(query);
if (result.hits().getErrorHit() == null) {
result.hits().setError(ErrorMessage.createBackendCommunicationError(
"Fast Search (" + getName() + ") failed: " + e.getMessage()));
}
if (query.getTraceLevel() >= 1) {
query.trace(getName() + " error response: " + result, false, 1);
}
return result;
}
}
private List<Result> partitionHits(Result result, String summaryClass) {
List<Result> parts = new ArrayList<>();
TinyIdentitySet<Query> queryMap = new TinyIdentitySet<>(4);
for (Iterator<Hit> i = hitIterator(result); i.hasNext(); ) {
Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
if ( ! fastHit.isFilled(summaryClass)) {
Query q = fastHit.getQuery();
if (q == null) {
q = result.hits().getQuery();
}
int idx = queryMap.indexOf(q);
if (idx < 0) {
idx = queryMap.size();
Result r = new Result(q);
parts.add(r);
queryMap.add(q);
}
parts.get(idx).hits().add(fastHit);
}
}
}
return parts;
}
@Override
public void fill(Result result, String summaryClass, Execution execution) {
if (result.isFilled(summaryClass)) return;
List<Result> parts= partitionHits(result, summaryClass);
if (parts.size() > 0) {
for (Result r : parts) {
doPartialFill(r, summaryClass);
mergeErrorsInto(result, r);
}
result.hits().setSorted(false);
result.analyzeHits();
}
}
private void mergeErrorsInto(Result destination, Result source) {
ErrorHit eh = source.hits().getErrorHit();
if (eh != null) {
for (ErrorMessage error : eh.errors())
destination.hits().addError(error);
}
}
static void traceQuery(String sourceName, String type, Query query, int offset, int hits, int level, Optional<String> quotedSummaryClass) {
if ((query.getTraceLevel()<level) || query.properties().getBoolean(TRACE_DISABLE)) return;
StringBuilder s = new StringBuilder();
s.append(sourceName).append(" " + type + " to dispatch: ")
.append("query=[")
.append(query.getModel().getQueryTree().getRoot().toString())
.append("]");
s.append(" timeout=").append(query.getTimeout()).append("ms");
s.append(" offset=")
.append(offset)
.append(" hits=")
.append(hits);
if (query.getRanking().hasRankProfile()) {
s.append(" rankprofile[")
.append(query.getRanking().getProfile())
.append("]");
}
if (query.getRanking().getFreshness() != null) {
s.append(" freshness=")
.append(query.getRanking().getFreshness().getRefTime());
}
if (query.getRanking().getSorting() != null) {
s.append(" sortspec=")
.append(query.getRanking().getSorting().fieldOrders().toString());
}
if (query.getRanking().getLocation() != null) {
s.append(" location=")
.append(query.getRanking().getLocation().toString());
}
List<Grouping> grouping = GroupingExecutor.getGroupingList(query);
s.append(" grouping=").append(grouping.size()).append(" : ");
for(Grouping g : grouping) {
s.append(g.toString());
}
if ( ! query.getRanking().getProperties().isEmpty()) {
s.append(" rankproperties=")
.append(query.getRanking().getProperties().toString());
}
if ( ! query.getRanking().getFeatures().isEmpty()) {
s.append(" rankfeatures=")
.append(query.getRanking().getFeatures().toString());
}
if (query.getModel().getRestrict() != null) {
s.append(" restrict=").append(query.getModel().getRestrict().toString());
}
if (quotedSummaryClass.isPresent()) {
s.append(" summary=").append(quotedSummaryClass.get());
}
query.trace(s.toString(), false, level);
if (query.isTraceable(level + 1)) {
query.trace("Current state of query tree: "
+ new TextualQueryRepresentation(query.getModel().getQueryTree().getRoot()),
false, level+1);
}
if (query.isTraceable(level + 2)) {
query.trace("YQL+ representation: " + query.yqlRepresentation(), level+2);
}
}
protected void addMetaInfo(Query query, QueryPacketData queryPacketData, QueryResultPacket resultPacket, Result result, boolean fromCache) {
result.setTotalHitCount(resultPacket.getTotalDocumentCount());
if (resultPacket.getGroupData() != null) {
byte[] data = resultPacket.getGroupData();
ArrayList<Grouping> list = new ArrayList<>();
BufferSerializer buf = new BufferSerializer(new GrowableByteBuffer(ByteBuffer.wrap(data)));
int cnt = buf.getInt(null);
for (int i = 0; i < cnt; i++) {
Grouping g = new Grouping();
g.deserialize(buf);
list.add(g);
}
GroupingListHit hit = new GroupingListHit(list, getDocsumDefinitionSet(query));
hit.setQuery(result.getQuery());
hit.setSource(getName());
hit.setSourceNumber(sourceNumber);
hit.setQueryPacketData(queryPacketData);
result.hits().add(hit);
}
if (resultPacket.getCoverageFeature()) {
result.setCoverage(new Coverage(resultPacket.getCoverageDocs(), resultPacket.getActiveDocs())
.setSoonActive(resultPacket.getSoonActiveDocs())
.setDegradedReason(resultPacket.getDegradedReason()));
}
}
private boolean fillHit(FastHit hit, DocsumPacket packet, String summaryClass) {
if (packet != null) {
byte[] docsumdata = packet.getData();
if (docsumdata.length > 0) {
decodeSummary(summaryClass, hit, docsumdata);
return true;
}
}
return false;
}
/**
* Fills the hits.
*
* @return the number of hits that we did not return data for, i.e
* when things are working normally we return 0.
*/
protected int fillHits(Result result, int packetIndex, Packet[] packets, String summaryClass) throws IOException {
int skippedHits=0;
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
Hit hit = i.next();
if (hit instanceof FastHit && !hit.isFilled(summaryClass)) {
FastHit fastHit = (FastHit) hit;
ensureInstanceOf(DocsumPacket.class, packets[packetIndex], getName());
DocsumPacket docsum = (DocsumPacket) packets[packetIndex];
packetIndex++;
if ( ! fillHit(fastHit, docsum, summaryClass))
skippedHits++;
}
}
result.hits().setSorted(false);
return skippedHits;
}
/**
* Throws an IOException if the packet is not of the expected type
*/
protected static void ensureInstanceOf(Class<? extends BasicPacket> type, BasicPacket packet, String name) throws IOException {
if ((type.isAssignableFrom(packet.getClass()))) return;
if (packet instanceof ErrorPacket) {
ErrorPacket errorPacket=(ErrorPacket)packet;
if (errorPacket.getErrorCode() == 8)
throw new TimeoutException("Query timed out in " + name);
else
throw new IOException("Received error from backend in " + name + ": " + packet);
} else {
throw new IOException("Received " + packet + " when expecting " + type);
}
}
private boolean addCachedHits(Result result,
PacketWrapper packetWrapper,
String summaryClass,
List<DocumentInfo> documents) {
boolean filledAllOfEm = true;
Query myQuery = result.getQuery();
for (DocumentInfo document : documents) {
FastHit hit = new FastHit();
hit.setQuery(myQuery);
hit.setUseRowInIndexUri(useRowInIndexUri(result));
hit.setFillable();
hit.setCached(true);
extractDocumentInfo(hit, document);
DocsumPacket docsum = (DocsumPacket) packetWrapper.getPacket(document.getGlobalId(), document.getPartId(), summaryClass);
if (docsum != null) {
byte[] docsumdata = docsum.getData();
if (docsumdata.length > 0) {
decodeSummary(summaryClass, hit, docsumdata);
} else {
filledAllOfEm = false;
}
} else {
filledAllOfEm = false;
}
result.hits().add(hit);
}
return filledAllOfEm;
}
private boolean useRowInIndexUri(Result result) {
return ! ((result.getQuery().properties().getString(grouping) != null) || result.getQuery().properties().getBoolean(combinerows));
}
private void extractDocumentInfo(FastHit hit, DocumentInfo document) {
hit.setSourceNumber(sourceNumber);
hit.setSource(getName());
Number rank = document.getMetric();
hit.setRelevance(new Relevance(rank.doubleValue()));
hit.setDistributionKey(document.getDistributionKey());
hit.setGlobalId(document.getGlobalId());
hit.setPartId(document.getPartId(), rowBits);
}
protected PacketWrapper cacheLookupTwoPhase(CacheKey cacheKey, Result result, String summaryClass) {
Query query = result.getQuery();
PacketWrapper packetWrapper = cacheControl.lookup(cacheKey, query);
if (packetWrapper == null) {
return null;
}
if (packetWrapper.getNumPackets() != 0) {
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
DocsumPacketKey key = new DocsumPacketKey(fastHit.getGlobalId(), fastHit.getPartId(), summaryClass);
if (fillHit(fastHit,
(DocsumPacket) packetWrapper.getPacket(key),
summaryClass)) {
fastHit.setCached(true);
}
}
}
result.hits().setSorted(false);
result.analyzeHits();
}
return packetWrapper;
}
protected DocsumDefinitionSet getDocsumDefinitionSet(Query query) {
DocumentDatabase db = getDocumentDatabase(query);
return db.getDocsumDefinitionSet();
}
private void decodeSummary(String summaryClass, FastHit hit, byte[] docsumdata) {
DocumentDatabase db = getDocumentDatabase(hit.getQuery());
hit.setField(Hit.SDDOCNAME_FIELD, db.getName());
decodeSummary(summaryClass, hit, docsumdata, db.getDocsumDefinitionSet());
}
private void decodeSummary(String summaryClass, FastHit hit, byte[] docsumdata, DocsumDefinitionSet docsumSet) {
docsumSet.lazyDecode(summaryClass, docsumdata, hit);
hit.setFilled(summaryClass);
}
/**
* Creates unfilled hits from a List of DocumentInfo instances. Do note
* cacheKey should be available if a cache is active, even if the hit is not
* created from a cache in the current call path.
*
* @param queryPacketData binary data from first phase of search, or null
* @param cacheKey the key this hit should match in the packet cache, or null
*/
protected boolean addUnfilledHits(Result result, List<DocumentInfo> documents, boolean fromCache, QueryPacketData queryPacketData, CacheKey cacheKey) {
boolean allHitsOK = true;
Query myQuery = result.getQuery();
for (DocumentInfo document : documents) {
try {
FastHit hit = new FastHit();
hit.setQuery(myQuery);
if (queryPacketData != null)
hit.setQueryPacketData(queryPacketData);
hit.setCacheKey(cacheKey);
hit.setUseRowInIndexUri(useRowInIndexUri(result));
hit.setFillable();
hit.setCached(fromCache);
extractDocumentInfo(hit, document);
result.hits().add(hit);
} catch (ConfigurationException e) {
allHitsOK = false;
getLogger().log(LogLevel.WARNING, "Skipping hit", e);
} catch (Exception e) {
allHitsOK = false;
getLogger().log(LogLevel.ERROR, "Skipping malformed hit", e);
}
}
return allHitsOK;
}
@SuppressWarnings("rawtypes")
public static VespaBackEndSearcher getSearcher(String s) {
try {
Class c = Class.forName(s);
if (VespaBackEndSearcher.class.isAssignableFrom(c)) {
Constructor[] constructors = c.getConstructors();
for (Constructor constructor : constructors) {
Class[] parameters = constructor.getParameterTypes();
if (parameters.length == 0) {
return (VespaBackEndSearcher) constructor.newInstance();
}
}
throw new RuntimeException("Failed initializing " + s);
} else {
throw new RuntimeException(s + " is not com.yahoo.prelude.fastsearch.VespaBackEndSearcher");
}
} catch (Exception e) {
throw new RuntimeException("Failure loading class " + s + ", exception :" + e);
}
}
protected boolean isLoggingFine() {
return getLogger().isLoggable(Level.FINE);
}
public boolean isLocalDispatching() { return localDispatching; }
public void setLocalDispatching(boolean localDispatching) {
this.localDispatching = localDispatching;
}
} | class VespaBackEndSearcher extends PingableSearcher {
private static final CompoundName grouping=new CompoundName("grouping");
private static final CompoundName combinerows=new CompoundName("combinerows");
/** If this is turned on this will fill summaries by dispatching directly to search nodes over RPC */
private final static CompoundName dispatchSummaries = new CompoundName("dispatch.summaries");
protected static final CompoundName PACKET_COMPRESSION_LIMIT = new CompoundName("packetcompressionlimit");
protected static final CompoundName PACKET_COMPRESSION_TYPE = new CompoundName("packetcompressiontype");
protected static final CompoundName TRACE_DISABLE = new CompoundName("trace.disable");
/** The set of all document databases available in the backend handled by this searcher */
private Map<String, DocumentDatabase> documentDbs = new LinkedHashMap<>();
private DocumentDatabase defaultDocumentDb = null;
/** Default docsum class. null means "unset" and is the default value */
private String defaultDocsumClass = null;
/** Returns an iterator which returns all hits below this result **/
protected Iterator<Hit> hitIterator(Result result) {
return result.hits().unorderedDeepIterator();
}
private boolean localDispatching = true;
/** The name of this source */
private String name;
/** Cache wrapper */
protected CacheControl cacheControl = null;
/**
* The number of last significant bits in the partId which specifies the
* row number in this backend,
* the rest specifies the column. 0 if not known.
*/
private int rowBits = 0;
/** Searchcluster number */
private int sourceNumber;
protected final String getName() { return name; }
protected final String getDefaultDocsumClass() { return defaultDocsumClass; }
/** Sets default document summary class. Default is null */
private void setDefaultDocsumClass(String docsumClass) { defaultDocsumClass = docsumClass; }
/** Returns the packet cache controller of this */
public final CacheControl getCacheControl() { return cacheControl; }
/**
* Searches a search cluster
* This is an endpoint - searchers will never propagate the search to any nested searcher.
*
* @param query the query to search
* @param queryPacket the serialized query representation to pass to the search cluster
* @param cacheKey the cache key created from the query packet, or null if caching is not used
* @param execution the query execution context
*/
protected abstract Result doSearch2(Query query, QueryPacket queryPacket, CacheKey cacheKey, Execution execution);
protected abstract void doPartialFill(Result result, String summaryClass);
protected static boolean wantsRPCSummarFill(Query query) {
return query.properties().getBoolean(dispatchSummaries);
}
/**
* Returns whether we need to send the query when fetching summaries.
* This is necessary if the query requests summary features or dynamic snippeting
*/
private Result cacheLookupFirstPhase(CacheKey key, QueryPacketData queryPacketData, Query query, int offset, int hits, String summaryClass) throws IOException {
PacketWrapper packetWrapper = cacheControl.lookup(key, query);
if (packetWrapper == null) return null;
List<DocumentInfo> documents = packetWrapper.getDocuments(offset, hits);
if (documents == null) return null;
if (query.getPresentation().getSummary() == null)
query.getPresentation().setSummary(getDefaultDocsumClass());
Result result = new Result(query);
QueryResultPacket resultPacket = packetWrapper.getFirstResultPacket();
addMetaInfo(query, queryPacketData, resultPacket, result, true);
if (packetWrapper.getNumPackets() == 0)
addUnfilledHits(result, documents, true, queryPacketData, key);
else
addCachedHits(result, packetWrapper, summaryClass, documents);
return result;
}
protected DocumentDatabase getDocumentDatabase(Query query) {
if (query.getModel().getRestrict().size() == 1) {
String docTypeName = (String)query.getModel().getRestrict().toArray()[0];
DocumentDatabase db = documentDbs.get(docTypeName);
if (db != null) {
return db;
}
}
return defaultDocumentDb;
}
private void resolveDocumentDatabase(Query query) {
DocumentDatabase docDb = getDocumentDatabase(query);
if (docDb != null) {
query.getModel().setDocumentDb(docDb.getName());
}
}
public final void init(SummaryParameters docSumParams, ClusterParams clusterParams, CacheParams cacheParams,
DocumentdbInfoConfig documentdbInfoConfig) {
this.name = clusterParams.searcherName;
this.sourceNumber = clusterParams.clusterNumber;
this.rowBits = clusterParams.rowBits;
Validator.ensureNotNull("Name of Vespa backend integration", getName());
setDefaultDocsumClass(docSumParams.defaultClass);
if (documentdbInfoConfig != null) {
for (DocumentdbInfoConfig.Documentdb docDb : documentdbInfoConfig.documentdb()) {
DocumentDatabase db = new DocumentDatabase(docDb, clusterParams.emulation);
if (documentDbs.isEmpty()) {
defaultDocumentDb = db;
}
documentDbs.put(docDb.name(), db);
}
}
if (cacheParams.cacheControl == null) {
this.cacheControl = new CacheControl(cacheParams.cacheMegaBytes, cacheParams.cacheTimeOutSeconds);
} else {
this.cacheControl = cacheParams.cacheControl;
}
}
protected void transformQuery(Query query) { }
public Result search(Query query, Execution execution) {
Item root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem) {
return new Result(query, ErrorMessage.createNullQuery(query.getHttpRequest().getUri().toString()));
}
if (wantsRPCSummarFill(query) && summaryNeedsQuery(query)) {
return new Result(query, ErrorMessage.createInvalidQueryParameter(
"When using dispatch.summaries and your summary/rankprofile require the query, " +
" you need to enable ranking.queryCache."));
}
QueryRewrite.optimizeByRestrict(query);
QueryRewrite.optimizeAndNot(query);
QueryRewrite.collapseSingleComposites(query);
root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem)
return new Result(query);
resolveDocumentDatabase(query);
transformQuery(query);
traceQuery(name, "search", query, query.getOffset(), query.getHits(), 1, Optional.<String>empty());
root = query.getModel().getQueryTree().getRoot();
if (root == null || root instanceof NullItem)
return new Result(query);
QueryPacket queryPacket = QueryPacket.create(query);
int compressionLimit = query.properties().getInteger(PACKET_COMPRESSION_LIMIT, 0);
queryPacket.setCompressionLimit(compressionLimit);
if (compressionLimit != 0)
queryPacket.setCompressionType(query.properties().getString(PACKET_COMPRESSION_TYPE, "lz4"));
if (isLoggingFine())
getLogger().fine("made QueryPacket: " + queryPacket);
Result result = null;
CacheKey cacheKey = null;
if (cacheControl.useCache(query)) {
cacheKey = new CacheKey(queryPacket);
result = getCached(cacheKey, queryPacket.getQueryPacketData(), query);
}
if (result == null) {
result = doSearch2(query, queryPacket, cacheKey, execution);
if (isLoggingFine())
getLogger().fine("Result NOT retrieved from cache");
if (query.getTraceLevel() >= 1)
query.trace(getName() + " dispatch response: " + result, false, 1);
result.trace(getName());
}
return result;
}
/**
* Returns a cached result, or null if no result was cached for this key
*
* @param cacheKey the cache key created from the query packet
* @param queryPacketData a serialization of the query, to avoid having to recompute this, or null if not available
* @param query the query, used for tracing, lookup of result window and result creation
*/
private Result getCached(CacheKey cacheKey, QueryPacketData queryPacketData, Query query) {
if (query.getTraceLevel() >= 6) {
query.trace("Cache key hash: " + cacheKey.hashCode(), 6);
if (query.getTraceLevel() >= 8) {
query.trace("Cache key: " + HexDump.toHexString(cacheKey.getCopyOfFullKey()), 8);
}
}
try {
Result result = cacheLookupFirstPhase(cacheKey, queryPacketData, query, query.getOffset(), query.getHits(), query.getPresentation().getSummary());
if (result == null) return null;
if (isLoggingFine()) {
getLogger().fine("Result retrieved from cache: " + result);
}
if (query.getTraceLevel() >= 1) {
query.trace(getName() + " cached response: " + result, false, 1);
}
result.trace(getName());
return result;
}
catch (IOException e) {
Result result = new Result(query);
if (result.hits().getErrorHit() == null) {
result.hits().setError(ErrorMessage.createBackendCommunicationError(
"Fast Search (" + getName() + ") failed: " + e.getMessage()));
}
if (query.getTraceLevel() >= 1) {
query.trace(getName() + " error response: " + result, false, 1);
}
return result;
}
}
private List<Result> partitionHits(Result result, String summaryClass) {
List<Result> parts = new ArrayList<>();
TinyIdentitySet<Query> queryMap = new TinyIdentitySet<>(4);
for (Iterator<Hit> i = hitIterator(result); i.hasNext(); ) {
Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
if ( ! fastHit.isFilled(summaryClass)) {
Query q = fastHit.getQuery();
if (q == null) {
q = result.hits().getQuery();
}
int idx = queryMap.indexOf(q);
if (idx < 0) {
idx = queryMap.size();
Result r = new Result(q);
parts.add(r);
queryMap.add(q);
}
parts.get(idx).hits().add(fastHit);
}
}
}
return parts;
}
@Override
public void fill(Result result, String summaryClass, Execution execution) {
if (result.isFilled(summaryClass)) return;
List<Result> parts= partitionHits(result, summaryClass);
if (parts.size() > 0) {
for (Result r : parts) {
doPartialFill(r, summaryClass);
mergeErrorsInto(result, r);
}
result.hits().setSorted(false);
result.analyzeHits();
}
}
private void mergeErrorsInto(Result destination, Result source) {
ErrorHit eh = source.hits().getErrorHit();
if (eh != null) {
for (ErrorMessage error : eh.errors())
destination.hits().addError(error);
}
}
static void traceQuery(String sourceName, String type, Query query, int offset, int hits, int level, Optional<String> quotedSummaryClass) {
if ((query.getTraceLevel()<level) || query.properties().getBoolean(TRACE_DISABLE)) return;
StringBuilder s = new StringBuilder();
s.append(sourceName).append(" " + type + " to dispatch: ")
.append("query=[")
.append(query.getModel().getQueryTree().getRoot().toString())
.append("]");
s.append(" timeout=").append(query.getTimeout()).append("ms");
s.append(" offset=")
.append(offset)
.append(" hits=")
.append(hits);
if (query.getRanking().hasRankProfile()) {
s.append(" rankprofile[")
.append(query.getRanking().getProfile())
.append("]");
}
if (query.getRanking().getFreshness() != null) {
s.append(" freshness=")
.append(query.getRanking().getFreshness().getRefTime());
}
if (query.getRanking().getSorting() != null) {
s.append(" sortspec=")
.append(query.getRanking().getSorting().fieldOrders().toString());
}
if (query.getRanking().getLocation() != null) {
s.append(" location=")
.append(query.getRanking().getLocation().toString());
}
List<Grouping> grouping = GroupingExecutor.getGroupingList(query);
s.append(" grouping=").append(grouping.size()).append(" : ");
for(Grouping g : grouping) {
s.append(g.toString());
}
if ( ! query.getRanking().getProperties().isEmpty()) {
s.append(" rankproperties=")
.append(query.getRanking().getProperties().toString());
}
if ( ! query.getRanking().getFeatures().isEmpty()) {
s.append(" rankfeatures=")
.append(query.getRanking().getFeatures().toString());
}
if (query.getModel().getRestrict() != null) {
s.append(" restrict=").append(query.getModel().getRestrict().toString());
}
if (quotedSummaryClass.isPresent()) {
s.append(" summary=").append(quotedSummaryClass.get());
}
query.trace(s.toString(), false, level);
if (query.isTraceable(level + 1)) {
query.trace("Current state of query tree: "
+ new TextualQueryRepresentation(query.getModel().getQueryTree().getRoot()),
false, level+1);
}
if (query.isTraceable(level + 2)) {
query.trace("YQL+ representation: " + query.yqlRepresentation(), level+2);
}
}
protected void addMetaInfo(Query query, QueryPacketData queryPacketData, QueryResultPacket resultPacket, Result result, boolean fromCache) {
result.setTotalHitCount(resultPacket.getTotalDocumentCount());
if (resultPacket.getGroupData() != null) {
byte[] data = resultPacket.getGroupData();
ArrayList<Grouping> list = new ArrayList<>();
BufferSerializer buf = new BufferSerializer(new GrowableByteBuffer(ByteBuffer.wrap(data)));
int cnt = buf.getInt(null);
for (int i = 0; i < cnt; i++) {
Grouping g = new Grouping();
g.deserialize(buf);
list.add(g);
}
GroupingListHit hit = new GroupingListHit(list, getDocsumDefinitionSet(query));
hit.setQuery(result.getQuery());
hit.setSource(getName());
hit.setSourceNumber(sourceNumber);
hit.setQueryPacketData(queryPacketData);
result.hits().add(hit);
}
if (resultPacket.getCoverageFeature()) {
result.setCoverage(new Coverage(resultPacket.getCoverageDocs(), resultPacket.getActiveDocs())
.setSoonActive(resultPacket.getSoonActiveDocs())
.setDegradedReason(resultPacket.getDegradedReason()));
}
}
private boolean fillHit(FastHit hit, DocsumPacket packet, String summaryClass) {
if (packet != null) {
byte[] docsumdata = packet.getData();
if (docsumdata.length > 0) {
decodeSummary(summaryClass, hit, docsumdata);
return true;
}
}
return false;
}
/**
* Fills the hits.
*
* @return the number of hits that we did not return data for, i.e
* when things are working normally we return 0.
*/
protected int fillHits(Result result, int packetIndex, Packet[] packets, String summaryClass) throws IOException {
int skippedHits=0;
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
Hit hit = i.next();
if (hit instanceof FastHit && !hit.isFilled(summaryClass)) {
FastHit fastHit = (FastHit) hit;
ensureInstanceOf(DocsumPacket.class, packets[packetIndex], getName());
DocsumPacket docsum = (DocsumPacket) packets[packetIndex];
packetIndex++;
if ( ! fillHit(fastHit, docsum, summaryClass))
skippedHits++;
}
}
result.hits().setSorted(false);
return skippedHits;
}
/**
* Throws an IOException if the packet is not of the expected type
*/
protected static void ensureInstanceOf(Class<? extends BasicPacket> type, BasicPacket packet, String name) throws IOException {
if ((type.isAssignableFrom(packet.getClass()))) return;
if (packet instanceof ErrorPacket) {
ErrorPacket errorPacket=(ErrorPacket)packet;
if (errorPacket.getErrorCode() == 8)
throw new TimeoutException("Query timed out in " + name);
else
throw new IOException("Received error from backend in " + name + ": " + packet);
} else {
throw new IOException("Received " + packet + " when expecting " + type);
}
}
private boolean addCachedHits(Result result,
PacketWrapper packetWrapper,
String summaryClass,
List<DocumentInfo> documents) {
boolean filledAllOfEm = true;
Query myQuery = result.getQuery();
for (DocumentInfo document : documents) {
FastHit hit = new FastHit();
hit.setQuery(myQuery);
hit.setUseRowInIndexUri(useRowInIndexUri(result));
hit.setFillable();
hit.setCached(true);
extractDocumentInfo(hit, document);
DocsumPacket docsum = (DocsumPacket) packetWrapper.getPacket(document.getGlobalId(), document.getPartId(), summaryClass);
if (docsum != null) {
byte[] docsumdata = docsum.getData();
if (docsumdata.length > 0) {
decodeSummary(summaryClass, hit, docsumdata);
} else {
filledAllOfEm = false;
}
} else {
filledAllOfEm = false;
}
result.hits().add(hit);
}
return filledAllOfEm;
}
private boolean useRowInIndexUri(Result result) {
return ! ((result.getQuery().properties().getString(grouping) != null) || result.getQuery().properties().getBoolean(combinerows));
}
private void extractDocumentInfo(FastHit hit, DocumentInfo document) {
hit.setSourceNumber(sourceNumber);
hit.setSource(getName());
Number rank = document.getMetric();
hit.setRelevance(new Relevance(rank.doubleValue()));
hit.setDistributionKey(document.getDistributionKey());
hit.setGlobalId(document.getGlobalId());
hit.setPartId(document.getPartId(), rowBits);
}
protected PacketWrapper cacheLookupTwoPhase(CacheKey cacheKey, Result result, String summaryClass) {
Query query = result.getQuery();
PacketWrapper packetWrapper = cacheControl.lookup(cacheKey, query);
if (packetWrapper == null) {
return null;
}
if (packetWrapper.getNumPackets() != 0) {
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
DocsumPacketKey key = new DocsumPacketKey(fastHit.getGlobalId(), fastHit.getPartId(), summaryClass);
if (fillHit(fastHit,
(DocsumPacket) packetWrapper.getPacket(key),
summaryClass)) {
fastHit.setCached(true);
}
}
}
result.hits().setSorted(false);
result.analyzeHits();
}
return packetWrapper;
}
protected DocsumDefinitionSet getDocsumDefinitionSet(Query query) {
DocumentDatabase db = getDocumentDatabase(query);
return db.getDocsumDefinitionSet();
}
private void decodeSummary(String summaryClass, FastHit hit, byte[] docsumdata) {
DocumentDatabase db = getDocumentDatabase(hit.getQuery());
hit.setField(Hit.SDDOCNAME_FIELD, db.getName());
decodeSummary(summaryClass, hit, docsumdata, db.getDocsumDefinitionSet());
}
private void decodeSummary(String summaryClass, FastHit hit, byte[] docsumdata, DocsumDefinitionSet docsumSet) {
docsumSet.lazyDecode(summaryClass, docsumdata, hit);
hit.setFilled(summaryClass);
}
/**
* Creates unfilled hits from a List of DocumentInfo instances. Do note
* cacheKey should be available if a cache is active, even if the hit is not
* created from a cache in the current call path.
*
* @param queryPacketData binary data from first phase of search, or null
* @param cacheKey the key this hit should match in the packet cache, or null
*/
protected boolean addUnfilledHits(Result result, List<DocumentInfo> documents, boolean fromCache, QueryPacketData queryPacketData, CacheKey cacheKey) {
boolean allHitsOK = true;
Query myQuery = result.getQuery();
for (DocumentInfo document : documents) {
try {
FastHit hit = new FastHit();
hit.setQuery(myQuery);
if (queryPacketData != null)
hit.setQueryPacketData(queryPacketData);
hit.setCacheKey(cacheKey);
hit.setUseRowInIndexUri(useRowInIndexUri(result));
hit.setFillable();
hit.setCached(fromCache);
extractDocumentInfo(hit, document);
result.hits().add(hit);
} catch (ConfigurationException e) {
allHitsOK = false;
getLogger().log(LogLevel.WARNING, "Skipping hit", e);
} catch (Exception e) {
allHitsOK = false;
getLogger().log(LogLevel.ERROR, "Skipping malformed hit", e);
}
}
return allHitsOK;
}
@SuppressWarnings("rawtypes")
public static VespaBackEndSearcher getSearcher(String s) {
try {
Class c = Class.forName(s);
if (VespaBackEndSearcher.class.isAssignableFrom(c)) {
Constructor[] constructors = c.getConstructors();
for (Constructor constructor : constructors) {
Class[] parameters = constructor.getParameterTypes();
if (parameters.length == 0) {
return (VespaBackEndSearcher) constructor.newInstance();
}
}
throw new RuntimeException("Failed initializing " + s);
} else {
throw new RuntimeException(s + " is not com.yahoo.prelude.fastsearch.VespaBackEndSearcher");
}
} catch (Exception e) {
throw new RuntimeException("Failure loading class " + s + ", exception :" + e);
}
}
protected boolean isLoggingFine() {
return getLogger().isLoggable(Level.FINE);
}
public boolean isLocalDispatching() { return localDispatching; }
public void setLocalDispatching(boolean localDispatching) {
this.localDispatching = localDispatching;
}
} | |
Should this be `code >= TRANSIENT_ERROR && code < FATAL_ERROR`? | public static boolean isTransient(int code) {
return code >= TRANSIENT_ERROR;
} | return code >= TRANSIENT_ERROR; | public static boolean isTransient(int code) {
return (code >= TRANSIENT_ERROR) && (code < FATAL_ERROR);
} | class ErrorCode {
/** The code is here for completeness. */
public static final int NONE = 0;
/** A general transient error, resending is possible. */
public static final int TRANSIENT_ERROR = 100000;
/** Sending was rejected because throttler capacity is full. */
public static final int SEND_QUEUE_FULL = TRANSIENT_ERROR + 1;
/** No addresses found for the services of the message route. */
public static final int NO_ADDRESS_FOR_SERVICE = TRANSIENT_ERROR + 2;
/** A connection problem occured while sending. */
public static final int CONNECTION_ERROR = TRANSIENT_ERROR + 3;
/** The session specified for the message is unknown. */
public static final int UNKNOWN_SESSION = TRANSIENT_ERROR + 4;
/** The recipient session is busy. */
public static final int SESSION_BUSY = TRANSIENT_ERROR + 5;
/** Sending aborted by route verification. */
public static final int SEND_ABORTED = TRANSIENT_ERROR + 6;
/** Version handshake failed for any reason. */
public static final int HANDSHAKE_FAILED = TRANSIENT_ERROR + 7;
/** An application specific transient error. */
public static final int APP_TRANSIENT_ERROR = TRANSIENT_ERROR + 50000;
/** A general non-recoverable error, resending is not possible. */
public static final int FATAL_ERROR = 200000;
/** Sending was rejected because throttler is closed. */
public static final int SEND_QUEUE_CLOSED = FATAL_ERROR + 1;
/** The route of the message is illegal. */
public static final int ILLEGAL_ROUTE = FATAL_ERROR + 2;
/** No services found for the message route. */
public static final int NO_SERVICES_FOR_ROUTE = FATAL_ERROR + 3;
/** An error occured while encoding the message. */
public static final int ENCODE_ERROR = FATAL_ERROR + 5;
/** A fatal network error occured while sending. */
public static final int NETWORK_ERROR = FATAL_ERROR + 6;
/** The protocol specified for the message is unknown. */
public static final int UNKNOWN_PROTOCOL = FATAL_ERROR + 7;
/** An error occured while decoding the message. */
public static final int DECODE_ERROR = FATAL_ERROR + 8;
/** A timeout occured while sending. */
public static final int TIMEOUT = FATAL_ERROR + 9;
/** The target is running an incompatible version. */
public static final int INCOMPATIBLE_VERSION = FATAL_ERROR + 10;
/** The policy specified in a route is unknown. */
public static final int UNKNOWN_POLICY = FATAL_ERROR + 11;
/** The network was shut down when attempting to send. */
public static final int NETWORK_SHUTDOWN = FATAL_ERROR + 12;
/** Exception thrown by routing policy. */
public static final int POLICY_ERROR = FATAL_ERROR + 13;
/** An error occured while sequencing a message. */
public static final int SEQUENCE_ERROR = FATAL_ERROR + 14;
/** An application specific non-recoverable error. */
public static final int APP_FATAL_ERROR = FATAL_ERROR + 50000;
/** No error codes are allowed to be this big. */
public static final int ERROR_LIMIT = APP_FATAL_ERROR + 50000;
/**
* Translates the given error code into its symbolic name.
*
* @param error The error code to translate.
* @return The symbolic name.
*/
public static String getName(int error) {
switch (error) {
case APP_FATAL_ERROR : return "APP_FATAL_ERROR";
case APP_TRANSIENT_ERROR : return "APP_TRANSIENT_ERROR";
case CONNECTION_ERROR : return "CONNECTION_ERROR";
case DECODE_ERROR : return "DECODE_ERROR";
case ENCODE_ERROR : return "ENCODE_ERROR";
case FATAL_ERROR : return "FATAL_ERROR";
case HANDSHAKE_FAILED : return "HANDSHAKE_FAILED";
case ILLEGAL_ROUTE : return "ILLEGAL_ROUTE";
case INCOMPATIBLE_VERSION : return "INCOMPATIBLE_VERSION";
case NETWORK_ERROR : return "NETWORK_ERROR";
case NETWORK_SHUTDOWN : return "NETWORK_SHUTDOWN";
case NO_ADDRESS_FOR_SERVICE : return "NO_ADDRESS_FOR_SERVICE";
case NO_SERVICES_FOR_ROUTE : return "NO_SERVICES_FOR_ROUTE";
case NONE : return "NONE";
case POLICY_ERROR : return "POLICY_ERROR";
case SEND_ABORTED : return "SEND_ABORTED";
case SEND_QUEUE_CLOSED : return "SEND_QUEUE_CLOSED";
case SEND_QUEUE_FULL : return "SEND_QUEUE_FULL";
case SEQUENCE_ERROR : return "SEQUENCE_ERROR";
case SESSION_BUSY : return "SESSION_BUSY";
case TIMEOUT : return "TIMEOUT";
case TRANSIENT_ERROR : return "TRANSIENT_ERROR";
case UNKNOWN_POLICY : return "UNKNOWN_POLICY";
case UNKNOWN_PROTOCOL : return "UNKNOWN_PROTOCOL";
case UNKNOWN_SESSION : return "UNKNOWN_SESSION";
default : return "UNKNOWN(" + error + ")";
}
}
public static boolean isFatal(int code) {
return code >= FATAL_ERROR;
}
public static boolean isMBusError(int code) {
return ((code < APP_TRANSIENT_ERROR) && isTransient(code))
|| ((code < APP_FATAL_ERROR) && isFatal(code));
}
} | class ErrorCode {
/** The code is here for completeness. */
public static final int NONE = 0;
/** A general transient error, resending is possible. */
public static final int TRANSIENT_ERROR = 100000;
/** Sending was rejected because throttler capacity is full. */
public static final int SEND_QUEUE_FULL = TRANSIENT_ERROR + 1;
/** No addresses found for the services of the message route. */
public static final int NO_ADDRESS_FOR_SERVICE = TRANSIENT_ERROR + 2;
/** A connection problem occured while sending. */
public static final int CONNECTION_ERROR = TRANSIENT_ERROR + 3;
/** The session specified for the message is unknown. */
public static final int UNKNOWN_SESSION = TRANSIENT_ERROR + 4;
/** The recipient session is busy. */
public static final int SESSION_BUSY = TRANSIENT_ERROR + 5;
/** Sending aborted by route verification. */
public static final int SEND_ABORTED = TRANSIENT_ERROR + 6;
/** Version handshake failed for any reason. */
public static final int HANDSHAKE_FAILED = TRANSIENT_ERROR + 7;
/** An application specific transient error. */
public static final int APP_TRANSIENT_ERROR = TRANSIENT_ERROR + 50000;
/** A general non-recoverable error, resending is not possible. */
public static final int FATAL_ERROR = 200000;
/** Sending was rejected because throttler is closed. */
public static final int SEND_QUEUE_CLOSED = FATAL_ERROR + 1;
/** The route of the message is illegal. */
public static final int ILLEGAL_ROUTE = FATAL_ERROR + 2;
/** No services found for the message route. */
public static final int NO_SERVICES_FOR_ROUTE = FATAL_ERROR + 3;
/** An error occured while encoding the message. */
public static final int ENCODE_ERROR = FATAL_ERROR + 5;
/** A fatal network error occured while sending. */
public static final int NETWORK_ERROR = FATAL_ERROR + 6;
/** The protocol specified for the message is unknown. */
public static final int UNKNOWN_PROTOCOL = FATAL_ERROR + 7;
/** An error occured while decoding the message. */
public static final int DECODE_ERROR = FATAL_ERROR + 8;
/** A timeout occured while sending. */
public static final int TIMEOUT = FATAL_ERROR + 9;
/** The target is running an incompatible version. */
public static final int INCOMPATIBLE_VERSION = FATAL_ERROR + 10;
/** The policy specified in a route is unknown. */
public static final int UNKNOWN_POLICY = FATAL_ERROR + 11;
/** The network was shut down when attempting to send. */
public static final int NETWORK_SHUTDOWN = FATAL_ERROR + 12;
/** Exception thrown by routing policy. */
public static final int POLICY_ERROR = FATAL_ERROR + 13;
/** An error occured while sequencing a message. */
public static final int SEQUENCE_ERROR = FATAL_ERROR + 14;
/** An application specific non-recoverable error. */
public static final int APP_FATAL_ERROR = FATAL_ERROR + 50000;
/** No error codes are allowed to be this big. */
public static final int ERROR_LIMIT = APP_FATAL_ERROR + 50000;
/**
* Translates the given error code into its symbolic name.
*
* @param error The error code to translate.
* @return The symbolic name.
*/
public static String getName(int error) {
switch (error) {
case APP_FATAL_ERROR : return "APP_FATAL_ERROR";
case APP_TRANSIENT_ERROR : return "APP_TRANSIENT_ERROR";
case CONNECTION_ERROR : return "CONNECTION_ERROR";
case DECODE_ERROR : return "DECODE_ERROR";
case ENCODE_ERROR : return "ENCODE_ERROR";
case FATAL_ERROR : return "FATAL_ERROR";
case HANDSHAKE_FAILED : return "HANDSHAKE_FAILED";
case ILLEGAL_ROUTE : return "ILLEGAL_ROUTE";
case INCOMPATIBLE_VERSION : return "INCOMPATIBLE_VERSION";
case NETWORK_ERROR : return "NETWORK_ERROR";
case NETWORK_SHUTDOWN : return "NETWORK_SHUTDOWN";
case NO_ADDRESS_FOR_SERVICE : return "NO_ADDRESS_FOR_SERVICE";
case NO_SERVICES_FOR_ROUTE : return "NO_SERVICES_FOR_ROUTE";
case NONE : return "NONE";
case POLICY_ERROR : return "POLICY_ERROR";
case SEND_ABORTED : return "SEND_ABORTED";
case SEND_QUEUE_CLOSED : return "SEND_QUEUE_CLOSED";
case SEND_QUEUE_FULL : return "SEND_QUEUE_FULL";
case SEQUENCE_ERROR : return "SEQUENCE_ERROR";
case SESSION_BUSY : return "SESSION_BUSY";
case TIMEOUT : return "TIMEOUT";
case TRANSIENT_ERROR : return "TRANSIENT_ERROR";
case UNKNOWN_POLICY : return "UNKNOWN_POLICY";
case UNKNOWN_PROTOCOL : return "UNKNOWN_PROTOCOL";
case UNKNOWN_SESSION : return "UNKNOWN_SESSION";
default : return "UNKNOWN(" + error + ")";
}
}
public static boolean isFatal(int code) {
return code >= FATAL_ERROR;
}
public static boolean isMBusError(int code) {
return ((code < APP_TRANSIENT_ERROR) && isTransient(code))
|| ((code < APP_FATAL_ERROR) && isFatal(code))
|| ((code < TRANSIENT_ERROR) && (code >= NONE));
}
} |
I think we should log which child nodes are not parked. | protected void expire(List<Node> expired) {
List<Node> nodesToRecycle = new ArrayList<>();
for (Node recycleCandidate : expired) {
if (recycleCandidate.status().hardwareFailureDescription().isPresent()) {
boolean shouldBeParked = recycleCandidate.type() != NodeType.host ||
nodeRepository.getChildNodes(recycleCandidate.hostname()).stream()
.allMatch(node -> node.state() == Node.State.parked);
if (shouldBeParked) nodeRepository.park(
recycleCandidate.hostname(), Agent.system, "Parked by FailedExpirer due to HW failure on node");
} else if (! failCountIndicatesHwFail(zone, recycleCandidate) || recycleCandidate.status().failCount() < 5) {
nodesToRecycle.add(recycleCandidate);
}
}
nodeRepository.setDirty(nodesToRecycle);
} | .allMatch(node -> node.state() == Node.State.parked); | protected void expire(List<Node> expired) {
List<Node> nodesToRecycle = new ArrayList<>();
for (Node recycleCandidate : expired) {
if (recycleCandidate.status().hardwareFailureDescription().isPresent()) {
List<String> nonParkedChildren = recycleCandidate.type() != NodeType.host ? Collections.emptyList() :
nodeRepository.getChildNodes(recycleCandidate.hostname()).stream()
.filter(node -> node.state() != Node.State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if (nonParkedChildren.isEmpty()) {
nodeRepository.park(recycleCandidate.hostname(), Agent.system, "Parked by FailedExpirer due to HW failure on node");
} else {
log.info(String.format("Expired failed node %s with HW fail is not parked because some of its children" +
" (%s) are not yet parked", recycleCandidate.hostname(), String.join(", ", nonParkedChildren)));
}
} else if (! failCountIndicatesHwFail(zone, recycleCandidate) || recycleCandidate.status().failCount() < 5) {
nodesToRecycle.add(recycleCandidate);
}
}
nodeRepository.setDirty(nodesToRecycle);
} | class FailedExpirer extends Expirer {
private final NodeRepository nodeRepository;
private final Zone zone;
public FailedExpirer(NodeRepository nodeRepository, Zone zone, Clock clock,
Duration failTimeout, JobControl jobControl) {
super(Node.State.failed, History.Event.Type.failed, nodeRepository, clock, failTimeout, jobControl);
this.nodeRepository = nodeRepository;
this.zone = zone;
}
@Override
private boolean failCountIndicatesHwFail(Zone zone, Node node) {
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) return false;
if (zone.system() == SystemName.cd) return false;
return zone.environment() == Environment.prod || zone.environment() == Environment.staging;
}
} | class FailedExpirer extends Expirer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final NodeRepository nodeRepository;
private final Zone zone;
public FailedExpirer(NodeRepository nodeRepository, Zone zone, Clock clock,
Duration failTimeout, JobControl jobControl) {
super(Node.State.failed, History.Event.Type.failed, nodeRepository, clock, failTimeout, jobControl);
this.nodeRepository = nodeRepository;
this.zone = zone;
}
@Override
private boolean failCountIndicatesHwFail(Zone zone, Node node) {
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) return false;
if (zone.system() == SystemName.cd) return false;
return zone.environment() == Environment.prod || zone.environment() == Environment.staging;
}
} |
Added. | protected void expire(List<Node> expired) {
List<Node> nodesToRecycle = new ArrayList<>();
for (Node recycleCandidate : expired) {
if (recycleCandidate.status().hardwareFailureDescription().isPresent()) {
boolean shouldBeParked = recycleCandidate.type() != NodeType.host ||
nodeRepository.getChildNodes(recycleCandidate.hostname()).stream()
.allMatch(node -> node.state() == Node.State.parked);
if (shouldBeParked) nodeRepository.park(
recycleCandidate.hostname(), Agent.system, "Parked by FailedExpirer due to HW failure on node");
} else if (! failCountIndicatesHwFail(zone, recycleCandidate) || recycleCandidate.status().failCount() < 5) {
nodesToRecycle.add(recycleCandidate);
}
}
nodeRepository.setDirty(nodesToRecycle);
} | .allMatch(node -> node.state() == Node.State.parked); | protected void expire(List<Node> expired) {
List<Node> nodesToRecycle = new ArrayList<>();
for (Node recycleCandidate : expired) {
if (recycleCandidate.status().hardwareFailureDescription().isPresent()) {
List<String> nonParkedChildren = recycleCandidate.type() != NodeType.host ? Collections.emptyList() :
nodeRepository.getChildNodes(recycleCandidate.hostname()).stream()
.filter(node -> node.state() != Node.State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if (nonParkedChildren.isEmpty()) {
nodeRepository.park(recycleCandidate.hostname(), Agent.system, "Parked by FailedExpirer due to HW failure on node");
} else {
log.info(String.format("Expired failed node %s with HW fail is not parked because some of its children" +
" (%s) are not yet parked", recycleCandidate.hostname(), String.join(", ", nonParkedChildren)));
}
} else if (! failCountIndicatesHwFail(zone, recycleCandidate) || recycleCandidate.status().failCount() < 5) {
nodesToRecycle.add(recycleCandidate);
}
}
nodeRepository.setDirty(nodesToRecycle);
} | class FailedExpirer extends Expirer {
private final NodeRepository nodeRepository;
private final Zone zone;
public FailedExpirer(NodeRepository nodeRepository, Zone zone, Clock clock,
Duration failTimeout, JobControl jobControl) {
super(Node.State.failed, History.Event.Type.failed, nodeRepository, clock, failTimeout, jobControl);
this.nodeRepository = nodeRepository;
this.zone = zone;
}
@Override
private boolean failCountIndicatesHwFail(Zone zone, Node node) {
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) return false;
if (zone.system() == SystemName.cd) return false;
return zone.environment() == Environment.prod || zone.environment() == Environment.staging;
}
} | class FailedExpirer extends Expirer {
private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
private final NodeRepository nodeRepository;
private final Zone zone;
public FailedExpirer(NodeRepository nodeRepository, Zone zone, Clock clock,
Duration failTimeout, JobControl jobControl) {
super(Node.State.failed, History.Event.Type.failed, nodeRepository, clock, failTimeout, jobControl);
this.nodeRepository = nodeRepository;
this.zone = zone;
}
@Override
private boolean failCountIndicatesHwFail(Zone zone, Node node) {
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) return false;
if (zone.system() == SystemName.cd) return false;
return zone.environment() == Environment.prod || zone.environment() == Environment.staging;
}
} |
One of these should be **b**`.allocation().isPresent()` | private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && a.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
} | if (a.allocation().isPresent() && a.allocation().isPresent()) { | private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && a.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
} | class NodePrioritizer {
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.spareHosts = findSpareHosts(allNodes, spares);
this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* From ipAddress - get hostname
*
* @return hostname or null if not able to do the lookup
*/
private static String lookupHostname(String ipAddress) {
try {
return InetAddress.getByName(ipAddress).getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return null;
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream()
.filter(n -> !spareNodes.contains(n))
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted((a, b) -> capacity.compareWithoutInactive(b, a))
.collect(Collectors.toList());
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host)) continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream()
.sorted((a, b) -> capacity.compare(b, a))
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
for (Node node : allNodes) {
if (node.type() == NodeType.host && node.state() == Node.State.active) {
boolean conflictingCluster = false;
NodeList list = new NodeList(allNodes);
NodeList childrenWithSameApp = list.childNodes(node).owner(appId);
for (Node child : childrenWithSameApp.asList()) {
if (child.allocation().get().membership().cluster().id().equals(clusterSpec.id())) {
conflictingCluster = true;
break;
}
}
if (!conflictingCluster && capacity.hasCapacity(node, ResourceCapacity.of(getFlavor(requestedNodes)))) {
Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes);
if (ipAddresses.isEmpty()) continue;
String ipAddress = ipAddresses.stream().findFirst().get();
String hostname = lookupHostname(ipAddress);
if (hostname == null) continue;
Node newNode = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
Collections.emptySet(), hostname, Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(newNode, nodePri);
}
}
}
}
}
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocatied to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
ResourceCapacity neededCapacity = headroomHosts.get(parent);
if (isNewNode) {
neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
}
pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childNodes(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
} | class NodePrioritizer {
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.spareHosts = findSpareHosts(allNodes, spares);
this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* From ipAddress - get hostname
*
* @return hostname or null if not able to do the lookup
*/
private static String lookupHostname(String ipAddress) {
try {
return InetAddress.getByName(ipAddress).getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return null;
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream()
.filter(n -> !spareNodes.contains(n))
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted((a, b) -> capacity.compareWithoutInactive(b, a))
.collect(Collectors.toList());
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host)) continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream()
.sorted((a, b) -> capacity.compare(b, a))
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
for (Node node : allNodes) {
if (node.type() == NodeType.host && node.state() == Node.State.active) {
boolean conflictingCluster = false;
NodeList list = new NodeList(allNodes);
NodeList childrenWithSameApp = list.childNodes(node).owner(appId);
for (Node child : childrenWithSameApp.asList()) {
if (child.allocation().get().membership().cluster().id().equals(clusterSpec.id())) {
conflictingCluster = true;
break;
}
}
if (!conflictingCluster && capacity.hasCapacity(node, ResourceCapacity.of(getFlavor(requestedNodes)))) {
Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes);
if (ipAddresses.isEmpty()) continue;
String ipAddress = ipAddresses.stream().findFirst().get();
String hostname = lookupHostname(ipAddress);
if (hostname == null) continue;
Node newNode = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
Collections.emptySet(), hostname, Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(newNode, nodePri);
}
}
}
}
}
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocatied to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
ResourceCapacity neededCapacity = headroomHosts.get(parent);
if (isNewNode) {
neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
}
pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childNodes(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
} |
I must say! Very nice spotted. This looks bad for the unit test I wrote for this too. | private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && a.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
} | if (a.allocation().isPresent() && a.allocation().isPresent()) { | private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && a.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
} | class NodePrioritizer {
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.spareHosts = findSpareHosts(allNodes, spares);
this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* From ipAddress - get hostname
*
* @return hostname or null if not able to do the lookup
*/
private static String lookupHostname(String ipAddress) {
try {
return InetAddress.getByName(ipAddress).getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return null;
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream()
.filter(n -> !spareNodes.contains(n))
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted((a, b) -> capacity.compareWithoutInactive(b, a))
.collect(Collectors.toList());
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host)) continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream()
.sorted((a, b) -> capacity.compare(b, a))
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
for (Node node : allNodes) {
if (node.type() == NodeType.host && node.state() == Node.State.active) {
boolean conflictingCluster = false;
NodeList list = new NodeList(allNodes);
NodeList childrenWithSameApp = list.childNodes(node).owner(appId);
for (Node child : childrenWithSameApp.asList()) {
if (child.allocation().get().membership().cluster().id().equals(clusterSpec.id())) {
conflictingCluster = true;
break;
}
}
if (!conflictingCluster && capacity.hasCapacity(node, ResourceCapacity.of(getFlavor(requestedNodes)))) {
Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes);
if (ipAddresses.isEmpty()) continue;
String ipAddress = ipAddresses.stream().findFirst().get();
String hostname = lookupHostname(ipAddress);
if (hostname == null) continue;
Node newNode = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
Collections.emptySet(), hostname, Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(newNode, nodePri);
}
}
}
}
}
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocatied to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
ResourceCapacity neededCapacity = headroomHosts.get(parent);
if (isNewNode) {
neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
}
pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childNodes(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
} | class NodePrioritizer {
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.spareHosts = findSpareHosts(allNodes, spares);
this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* From ipAddress - get hostname
*
* @return hostname or null if not able to do the lookup
*/
private static String lookupHostname(String ipAddress) {
try {
return InetAddress.getByName(ipAddress).getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return null;
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream()
.filter(n -> !spareNodes.contains(n))
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted((a, b) -> capacity.compareWithoutInactive(b, a))
.collect(Collectors.toList());
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host)) continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream()
.sorted((a, b) -> capacity.compare(b, a))
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
for (Node node : allNodes) {
if (node.type() == NodeType.host && node.state() == Node.State.active) {
boolean conflictingCluster = false;
NodeList list = new NodeList(allNodes);
NodeList childrenWithSameApp = list.childNodes(node).owner(appId);
for (Node child : childrenWithSameApp.asList()) {
if (child.allocation().get().membership().cluster().id().equals(clusterSpec.id())) {
conflictingCluster = true;
break;
}
}
if (!conflictingCluster && capacity.hasCapacity(node, ResourceCapacity.of(getFlavor(requestedNodes)))) {
Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes);
if (ipAddresses.isEmpty()) continue;
String ipAddress = ipAddresses.stream().findFirst().get();
String hostname = lookupHostname(ipAddress);
if (hostname == null) continue;
Node newNode = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
Collections.emptySet(), hostname, Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(newNode, nodePri);
}
}
}
}
}
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocatied to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
ResourceCapacity neededCapacity = headroomHosts.get(parent);
if (isNewNode) {
neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
}
pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childNodes(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
} |
Ah.. I see. due to the condition above, the && condition is not necessary. I'll change it as it is more readable instead of removing it. | private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && a.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
} | if (a.allocation().isPresent() && a.allocation().isPresent()) { | private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && a.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
} | class NodePrioritizer {
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.spareHosts = findSpareHosts(allNodes, spares);
this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* From ipAddress - get hostname
*
* @return hostname or null if not able to do the lookup
*/
private static String lookupHostname(String ipAddress) {
try {
return InetAddress.getByName(ipAddress).getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return null;
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream()
.filter(n -> !spareNodes.contains(n))
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted((a, b) -> capacity.compareWithoutInactive(b, a))
.collect(Collectors.toList());
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host)) continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream()
.sorted((a, b) -> capacity.compare(b, a))
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
for (Node node : allNodes) {
if (node.type() == NodeType.host && node.state() == Node.State.active) {
boolean conflictingCluster = false;
NodeList list = new NodeList(allNodes);
NodeList childrenWithSameApp = list.childNodes(node).owner(appId);
for (Node child : childrenWithSameApp.asList()) {
if (child.allocation().get().membership().cluster().id().equals(clusterSpec.id())) {
conflictingCluster = true;
break;
}
}
if (!conflictingCluster && capacity.hasCapacity(node, ResourceCapacity.of(getFlavor(requestedNodes)))) {
Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes);
if (ipAddresses.isEmpty()) continue;
String ipAddress = ipAddresses.stream().findFirst().get();
String hostname = lookupHostname(ipAddress);
if (hostname == null) continue;
Node newNode = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
Collections.emptySet(), hostname, Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(newNode, nodePri);
}
}
}
}
}
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocatied to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
ResourceCapacity neededCapacity = headroomHosts.get(parent);
if (isNewNode) {
neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
}
pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childNodes(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
} | class NodePrioritizer {
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.spareHosts = findSpareHosts(allNodes, spares);
this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* From ipAddress - get hostname
*
* @return hostname or null if not able to do the lookup
*/
private static String lookupHostname(String ipAddress) {
try {
return InetAddress.getByName(ipAddress).getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return null;
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream()
.filter(n -> !spareNodes.contains(n))
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted((a, b) -> capacity.compareWithoutInactive(b, a))
.collect(Collectors.toList());
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host)) continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream()
.sorted((a, b) -> capacity.compare(b, a))
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
for (Node node : allNodes) {
if (node.type() == NodeType.host && node.state() == Node.State.active) {
boolean conflictingCluster = false;
NodeList list = new NodeList(allNodes);
NodeList childrenWithSameApp = list.childNodes(node).owner(appId);
for (Node child : childrenWithSameApp.asList()) {
if (child.allocation().get().membership().cluster().id().equals(clusterSpec.id())) {
conflictingCluster = true;
break;
}
}
if (!conflictingCluster && capacity.hasCapacity(node, ResourceCapacity.of(getFlavor(requestedNodes)))) {
Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes);
if (ipAddresses.isEmpty()) continue;
String ipAddress = ipAddresses.stream().findFirst().get();
String hostname = lookupHostname(ipAddress);
if (hostname == null) continue;
Node newNode = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
Collections.emptySet(), hostname, Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(newNode, nodePri);
}
}
}
}
}
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocatied to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
ResourceCapacity neededCapacity = headroomHosts.get(parent);
if (isNewNode) {
neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
}
pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childNodes(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
} |
Props! Not often you see this kind of errors spotted in reviews. | private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && a.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
} | if (a.allocation().isPresent() && a.allocation().isPresent()) { | private static int compareForRelocation(Node a, Node b) {
int capacity = ResourceCapacity.of(a).compare(ResourceCapacity.of(b));
if (capacity != 0) return capacity;
if (!a.allocation().isPresent() && b.allocation().isPresent()) return -1;
if (a.allocation().isPresent() && !b.allocation().isPresent()) return 1;
if (a.allocation().isPresent() && a.allocation().isPresent()) {
if (a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
!b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return -1;
if (!a.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container) &&
b.allocation().get().membership().cluster().type().equals(ClusterSpec.Type.container))
return 1;
}
return a.hostname().compareTo(b.hostname());
} | class NodePrioritizer {
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.spareHosts = findSpareHosts(allNodes, spares);
this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* From ipAddress - get hostname
*
* @return hostname or null if not able to do the lookup
*/
private static String lookupHostname(String ipAddress) {
try {
return InetAddress.getByName(ipAddress).getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return null;
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream()
.filter(n -> !spareNodes.contains(n))
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted((a, b) -> capacity.compareWithoutInactive(b, a))
.collect(Collectors.toList());
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host)) continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream()
.sorted((a, b) -> capacity.compare(b, a))
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
for (Node node : allNodes) {
if (node.type() == NodeType.host && node.state() == Node.State.active) {
boolean conflictingCluster = false;
NodeList list = new NodeList(allNodes);
NodeList childrenWithSameApp = list.childNodes(node).owner(appId);
for (Node child : childrenWithSameApp.asList()) {
if (child.allocation().get().membership().cluster().id().equals(clusterSpec.id())) {
conflictingCluster = true;
break;
}
}
if (!conflictingCluster && capacity.hasCapacity(node, ResourceCapacity.of(getFlavor(requestedNodes)))) {
Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes);
if (ipAddresses.isEmpty()) continue;
String ipAddress = ipAddresses.stream().findFirst().get();
String hostname = lookupHostname(ipAddress);
if (hostname == null) continue;
Node newNode = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
Collections.emptySet(), hostname, Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(newNode, nodePri);
}
}
}
}
}
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocatied to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
ResourceCapacity neededCapacity = headroomHosts.get(parent);
if (isNewNode) {
neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
}
pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childNodes(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
} | class NodePrioritizer {
private final Map<Node, PrioritizableNode> nodes = new HashMap<>();
private final List<Node> allNodes;
private final DockerHostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId appId;
private final ClusterSpec clusterSpec;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, NodeFlavors nodeFlavors, int spares) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.spareHosts = findSpareHosts(allNodes, spares);
this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
long nofFailedNodes = allNodes.stream()
.filter(node -> node.state().equals(Node.State.failed))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
long nofNodesInCluster = allNodes.stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.count();
this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes);
this.isDocker = isDocker();
}
/**
* From ipAddress - get hostname
*
* @return hostname or null if not able to do the lookup
*/
private static String lookupHostname(String ipAddress) {
try {
return InetAddress.getByName(ipAddress).getHostName();
} catch (UnknownHostException e) {
e.printStackTrace();
}
return null;
}
/**
* Spare hosts are the two hosts in the system with the most free capacity.
*
* We do not count retired or inactive nodes as used capacity (as they could have been
* moved to create space for the spare node in the first place).
*/
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted(capacity::compareWithoutInactive)
.limit(spares)
.collect(Collectors.toSet());
}
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream()
.filter(n -> !spareNodes.contains(n))
.filter(node -> node.type().equals(NodeType.host))
.filter(dockerHost -> dockerHost.state().equals(Node.State.active))
.filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
.sorted((a, b) -> capacity.compareWithoutInactive(b, a))
.collect(Collectors.toList());
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host)) continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream()
.sorted((a, b) -> capacity.compare(b, a))
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
/**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
List<PrioritizableNode> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList);
return priorityList;
}
/**
* Add nodes that have been previously reserved to the same application from
* an earlier downsizing of a cluster
*/
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
PrioritizableNode nodePri = toNodePriority(node, true, false);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(node, nodePri);
}
}
}
/**
* Add a node on each docker host with enough capacity for the requested flavor
*/
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
for (Node node : allNodes) {
if (node.type() == NodeType.host && node.state() == Node.State.active) {
boolean conflictingCluster = false;
NodeList list = new NodeList(allNodes);
NodeList childrenWithSameApp = list.childNodes(node).owner(appId);
for (Node child : childrenWithSameApp.asList()) {
if (child.allocation().get().membership().cluster().id().equals(clusterSpec.id())) {
conflictingCluster = true;
break;
}
}
if (!conflictingCluster && capacity.hasCapacity(node, ResourceCapacity.of(getFlavor(requestedNodes)))) {
Set<String> ipAddresses = DockerHostCapacity.findFreeIps(node, allNodes);
if (ipAddresses.isEmpty()) continue;
String ipAddress = ipAddresses.stream().findFirst().get();
String hostname = lookupHostname(ipAddress);
if (hostname == null) continue;
Node newNode = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
Collections.emptySet(), hostname, Optional.of(node.hostname()), getFlavor(requestedNodes), NodeType.tenant);
PrioritizableNode nodePri = toNodePriority(newNode, false, true);
if (!nodePri.violatesSpares || isAllocatingForReplacement) {
nodes.put(newNode, nodePri);
}
}
}
}
}
/**
* Add existing nodes allocated to the application
*/
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(appId))
.map(node -> toNodePriority(node, false, false))
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Add nodes already provisioned, but not allocatied to any application
*/
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
.filter(node -> node.state().equals(Node.State.ready))
.map(node -> toNodePriority(node, false, false))
.filter(n -> !n.violatesSpares || isAllocatingForReplacement)
.forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode));
}
/**
* Convert a list of nodes to a list of node priorities. This includes finding, calculating
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toNodePriority(Node node, boolean isSurplusNode, boolean isNewNode) {
PrioritizableNode pri = new PrioritizableNode();
pri.node = node;
pri.isSurplusNode = isSurplusNode;
pri.isNewNode = isNewNode;
pri.preferredOnFlavor = requestedNodes.specifiesNonStockFlavor() && node.flavor().equals(getFlavor(requestedNodes));
pri.parent = findParentNode(node);
if (pri.parent.isPresent()) {
Node parent = pri.parent.get();
pri.freeParentCapacity = capacity.freeCapacityOf(parent, false);
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
ResourceCapacity neededCapacity = headroomHosts.get(parent);
if (isNewNode) {
neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
}
pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
}
}
return pri;
}
static boolean isPreferredNodeToBeReloacted(List<Node> nodes, Node node, Node parent) {
NodeList list = new NodeList(nodes);
return list.childNodes(parent).asList().stream()
.sorted(NodePrioritizer::compareForRelocation)
.findFirst()
.filter(n -> n.equals(node))
.isPresent();
}
private boolean isReplacement(long nofNodesInCluster, long nodeFailedNodes) {
if (nodeFailedNodes == 0) return false;
int wantedCount = 0;
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
wantedCount = countSpec.getCount();
}
return (wantedCount > nofNodesInCluster - nodeFailedNodes);
}
private static Flavor getFlavor(NodeSpec requestedNodes) {
if (requestedNodes instanceof NodeSpec.CountNodeSpec) {
NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) requestedNodes;
return countSpec.getFlavor();
}
return null;
}
private boolean isDocker() {
Flavor flavor = getFlavor(requestedNodes);
return (flavor != null) && flavor.getType().equals(Flavor.Type.DOCKER_CONTAINER);
}
private Optional<Node> findParentNode(Node node) {
if (!node.parentHostname().isPresent()) return Optional.empty();
return allNodes.stream()
.filter(n -> n.hostname().equals(node.parentHostname().orElse(" NOT A NODE")))
.findAny();
}
} |
No need to call `.stream()`, `Collection` provides `foreach()`. | private TopologicalDocumentTypeSorter(Collection<NewDocumentType> documentTypes) {
documentTypes.stream().forEach(docType -> unsortedTypes.put(docType.getName(), docType));
unsortedTypes.values().stream().forEach(docType -> depthFirstTraverse(docType));
} | documentTypes.stream().forEach(docType -> unsortedTypes.put(docType.getName(), docType)); | private TopologicalDocumentTypeSorter(Collection<NewDocumentType> documentTypes) {
documentTypes.forEach(docType -> unsortedTypes.put(docType.getName(), docType));
unsortedTypes.values().forEach(docType -> depthFirstTraverse(docType));
} | class TopologicalDocumentTypeSorter {
private final Map<String, NewDocumentType> unsortedTypes = new LinkedHashMap<>();
private final Map<String, NewDocumentType> sortedTypes = new LinkedHashMap<>();
private void depthFirstTraverse(NewDocumentType docType) {
if (sortedTypes.containsKey(docType.getName())) {
return;
}
for (NewDocumentType.Name referenceDocTypeName : docType.getDocumentReferences()) {
NewDocumentType referenceDocType = unsortedTypes.get(referenceDocTypeName.getName());
assert (referenceDocType != null);
depthFirstTraverse(referenceDocType);
}
sortedTypes.put(docType.getName(), docType);
}
public static List<NewDocumentType> sort(Collection<NewDocumentType> documentTypes) {
TopologicalDocumentTypeSorter sorter = new TopologicalDocumentTypeSorter(documentTypes);
return sorter.sortedTypes.values().stream().collect(Collectors.toList());
}
} | class TopologicalDocumentTypeSorter {
private final Map<String, NewDocumentType> unsortedTypes = new LinkedHashMap<>();
private final Map<String, NewDocumentType> sortedTypes = new LinkedHashMap<>();
private void depthFirstTraverse(NewDocumentType docType) {
if (sortedTypes.containsKey(docType.getName())) {
return;
}
for (NewDocumentType.Name referenceDocTypeName : docType.getDocumentReferences()) {
NewDocumentType referenceDocType = unsortedTypes.get(referenceDocTypeName.getName());
depthFirstTraverse(referenceDocType);
}
sortedTypes.put(docType.getName(), docType);
}
public static List<NewDocumentType> sort(Collection<NewDocumentType> documentTypes) {
TopologicalDocumentTypeSorter sorter = new TopologicalDocumentTypeSorter(documentTypes);
return new ArrayList<>(sorter.sortedTypes.values());
}
} |
Same as above. | private TopologicalDocumentTypeSorter(Collection<NewDocumentType> documentTypes) {
documentTypes.stream().forEach(docType -> unsortedTypes.put(docType.getName(), docType));
unsortedTypes.values().stream().forEach(docType -> depthFirstTraverse(docType));
} | unsortedTypes.values().stream().forEach(docType -> depthFirstTraverse(docType)); | private TopologicalDocumentTypeSorter(Collection<NewDocumentType> documentTypes) {
documentTypes.forEach(docType -> unsortedTypes.put(docType.getName(), docType));
unsortedTypes.values().forEach(docType -> depthFirstTraverse(docType));
} | class TopologicalDocumentTypeSorter {
private final Map<String, NewDocumentType> unsortedTypes = new LinkedHashMap<>();
private final Map<String, NewDocumentType> sortedTypes = new LinkedHashMap<>();
private void depthFirstTraverse(NewDocumentType docType) {
if (sortedTypes.containsKey(docType.getName())) {
return;
}
for (NewDocumentType.Name referenceDocTypeName : docType.getDocumentReferences()) {
NewDocumentType referenceDocType = unsortedTypes.get(referenceDocTypeName.getName());
assert (referenceDocType != null);
depthFirstTraverse(referenceDocType);
}
sortedTypes.put(docType.getName(), docType);
}
public static List<NewDocumentType> sort(Collection<NewDocumentType> documentTypes) {
TopologicalDocumentTypeSorter sorter = new TopologicalDocumentTypeSorter(documentTypes);
return sorter.sortedTypes.values().stream().collect(Collectors.toList());
}
} | class TopologicalDocumentTypeSorter {
private final Map<String, NewDocumentType> unsortedTypes = new LinkedHashMap<>();
private final Map<String, NewDocumentType> sortedTypes = new LinkedHashMap<>();
private void depthFirstTraverse(NewDocumentType docType) {
if (sortedTypes.containsKey(docType.getName())) {
return;
}
for (NewDocumentType.Name referenceDocTypeName : docType.getDocumentReferences()) {
NewDocumentType referenceDocType = unsortedTypes.get(referenceDocTypeName.getName());
depthFirstTraverse(referenceDocType);
}
sortedTypes.put(docType.getName(), docType);
}
public static List<NewDocumentType> sort(Collection<NewDocumentType> documentTypes) {
TopologicalDocumentTypeSorter sorter = new TopologicalDocumentTypeSorter(documentTypes);
return new ArrayList<>(sorter.sortedTypes.values());
}
} |
If the BE come to alive within the delay, the task won't be scheduled. | private void matchGroup() {
GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState();
ColocateTableIndex colocateIndex = globalStateMgr.getColocateTableIndex();
TabletScheduler tabletScheduler = globalStateMgr.getTabletScheduler();
long checkStartTime = System.currentTimeMillis();
Set<GroupId> groupIds = colocateIndex.getAllGroupIds();
for (GroupId groupId : groupIds) {
List<Long> tableIds = colocateIndex.getAllTableIds(groupId);
Database db = globalStateMgr.getDbIncludeRecycleBin(groupId.dbId);
if (db == null) {
continue;
}
List<Set<Long>> backendBucketsSeq = colocateIndex.getBackendsPerBucketSeqSet(groupId);
if (backendBucketsSeq.isEmpty()) {
continue;
}
boolean isGroupStable = true;
db.readLock();
try {
OUT:
for (Long tableId : tableIds) {
OlapTable olapTable = (OlapTable) globalStateMgr.getTableIncludeRecycleBin(db, tableId);
if (olapTable == null || !colocateIndex.isColocateTable(olapTable.getId())) {
continue;
}
for (Partition partition : globalStateMgr.getPartitionsIncludeRecycleBin(olapTable)) {
short replicationNum =
globalStateMgr.getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(),
partition.getId());
if (replicationNum == (short) -1) {
continue;
}
long visibleVersion = partition.getVisibleVersion();
for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) {
Preconditions.checkState(backendBucketsSeq.size() == index.getTablets().size(),
backendBucketsSeq.size() + " vs. " + index.getTablets().size());
int idx = 0;
for (Long tabletId : index.getTabletIdsInOrder()) {
LocalTablet tablet = (LocalTablet) index.getTablet(tabletId);
if (!tabletScheduler.containsTablet(tablet.getId())) {
Set<Long> bucketsSeq = backendBucketsSeq.get(idx);
Preconditions.checkState(bucketsSeq.size() == replicationNum,
bucketsSeq.size() + " vs. " + replicationNum);
TabletStatus st = tablet.getColocateHealthStatus(visibleVersion,
replicationNum, bucketsSeq);
if (st != TabletStatus.HEALTHY) {
isGroupStable = false;
Priority colocateUnhealthyPrio = Priority.HIGH;
if (tablet.readyToBeRepaired(colocateUnhealthyPrio)) {
LOG.debug("get unhealthy tablet {} in colocate table. status: {}",
tablet.getId(),
st);
TabletSchedCtx tabletCtx = new TabletSchedCtx(
TabletSchedCtx.Type.REPAIR,
db.getId(), tableId, partition.getId(), index.getId(),
tablet.getId(),
System.currentTimeMillis());
tabletCtx.setTabletStatus(st);
tabletCtx.setOrigPriority(colocateUnhealthyPrio);
tabletCtx.setTabletOrderIdx(idx);
AddResult res = tabletScheduler.addTablet(tabletCtx, false /* not force */);
if (res == AddResult.LIMIT_EXCEED) {
LOG.info("number of scheduling tablets in tablet scheduler"
+ " exceed to limit. stop colocate table check");
break OUT;
}
}
} else {
tablet.setLastStatusCheckTime(checkStartTime);
}
}
idx++;
}
}
}
}
if (isGroupStable) {
colocateIndex.markGroupStable(groupId, true);
} else {
colocateIndex.markGroupUnstable(groupId, true);
}
} finally {
db.readUnlock();
}
}
} | if (tablet.readyToBeRepaired(colocateUnhealthyPrio)) { | private void matchGroup() {
GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState();
ColocateTableIndex colocateIndex = globalStateMgr.getColocateTableIndex();
TabletScheduler tabletScheduler = globalStateMgr.getTabletScheduler();
long checkStartTime = System.currentTimeMillis();
Set<GroupId> groupIds = colocateIndex.getAllGroupIds();
for (GroupId groupId : groupIds) {
List<Long> tableIds = colocateIndex.getAllTableIds(groupId);
Database db = globalStateMgr.getDbIncludeRecycleBin(groupId.dbId);
if (db == null) {
continue;
}
List<Set<Long>> backendBucketsSeq = colocateIndex.getBackendsPerBucketSeqSet(groupId);
if (backendBucketsSeq.isEmpty()) {
continue;
}
boolean isGroupStable = true;
db.readLock();
try {
OUT:
for (Long tableId : tableIds) {
OlapTable olapTable = (OlapTable) globalStateMgr.getTableIncludeRecycleBin(db, tableId);
if (olapTable == null || !colocateIndex.isColocateTable(olapTable.getId())) {
continue;
}
for (Partition partition : globalStateMgr.getPartitionsIncludeRecycleBin(olapTable)) {
short replicationNum =
globalStateMgr.getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(),
partition.getId());
if (replicationNum == (short) -1) {
continue;
}
long visibleVersion = partition.getVisibleVersion();
for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) {
Preconditions.checkState(backendBucketsSeq.size() == index.getTablets().size(),
backendBucketsSeq.size() + " vs. " + index.getTablets().size());
int idx = 0;
for (Long tabletId : index.getTabletIdsInOrder()) {
LocalTablet tablet = (LocalTablet) index.getTablet(tabletId);
if (!tabletScheduler.containsTablet(tablet.getId())) {
Set<Long> bucketsSeq = backendBucketsSeq.get(idx);
Preconditions.checkState(bucketsSeq.size() == replicationNum,
bucketsSeq.size() + " vs. " + replicationNum);
TabletStatus st = tablet.getColocateHealthStatus(visibleVersion,
replicationNum, bucketsSeq);
if (st != TabletStatus.HEALTHY) {
isGroupStable = false;
Priority colocateUnhealthyPrio = Priority.HIGH;
if (tablet.readyToBeRepaired(colocateUnhealthyPrio)) {
LOG.debug("get unhealthy tablet {} in colocate table. status: {}",
tablet.getId(),
st);
TabletSchedCtx tabletCtx = new TabletSchedCtx(
TabletSchedCtx.Type.REPAIR,
db.getId(), tableId, partition.getId(), index.getId(),
tablet.getId(),
System.currentTimeMillis());
tabletCtx.setTabletStatus(st);
tabletCtx.setOrigPriority(colocateUnhealthyPrio);
tabletCtx.setTabletOrderIdx(idx);
AddResult res = tabletScheduler.addTablet(tabletCtx, false /* not force */);
if (res == AddResult.LIMIT_EXCEED) {
LOG.info("number of scheduling tablets in tablet scheduler"
+ " exceed to limit. stop colocate table check");
break OUT;
}
}
} else {
tablet.setLastStatusCheckTime(checkStartTime);
}
}
idx++;
}
}
}
}
if (isGroupStable) {
colocateIndex.markGroupStable(groupId, true);
} else {
colocateIndex.markGroupUnstable(groupId, true);
}
} finally {
db.readUnlock();
}
}
} | class ColocateTableBalancer extends LeaderDaemon {
private static final Logger LOG = LogManager.getLogger(ColocateTableBalancer.class);
private static final long CHECK_INTERVAL_MS = 20 * 1000L;
private ColocateTableBalancer(long intervalMs) {
super("colocate group clone checker", intervalMs);
}
private static ColocateTableBalancer INSTANCE = null;
public static ColocateTableBalancer getInstance() {
if (INSTANCE == null) {
INSTANCE = new ColocateTableBalancer(CHECK_INTERVAL_MS);
}
return INSTANCE;
}
@Override
/*
* Each round, we do 2 steps:
* 1. Relocate and balance group:
* Backend is not available, find a new backend to replace it.
* and after all unavailable has been replaced, balance the group
*
* 2. Match group:
* If replica mismatch backends in a group, that group will be marked as unstable, and pass that
* tablet to TabletScheduler.
* Otherwise, mark the group as stable
*/
protected void runAfterCatalogReady() {
relocateAndBalanceGroup();
matchGroup();
}
/*
* relocate and balance group
* here we just let replicas in colocate table evenly distributed in cluster, not consider the
* cluster load statistic.
* for example:
* currently there are 4 backends A B C D with following load:
*
* +-+
* | |
* +-+ +-+ +-+ | |
* | | | | | | | |
* +-+ +-+ +-+ +-+
* A B C D
*
* And colocate group balancer will still evenly distribute the replicas to all 4 backends, not
* just 3 low load backends.
*
* X
* X
* X X X +-+
* X X X | |
* +-+ +-+ +-+ | |
* | | | | | | | |
* +-+ +-+ +-+ +-+
* A B C D
*
* So After colocate balance, the cluster may still 'unbalanced' from a global perspective.
* And the LoadBalancer will balance the non-colocate table's replicas to make the
* cluster balance, eventually.
*
* X X X X
* X X X X
* +-+ +-+ +-+ +-+
* | | | | | | | |
* | | | | | | | |
* +-+ +-+ +-+ +-+
* A B C D
*/
private void relocateAndBalanceGroup() {
if (Config.disable_colocate_balance) {
return;
}
GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState();
ColocateTableIndex colocateIndex = globalStateMgr.getColocateTableIndex();
SystemInfoService infoService = GlobalStateMgr.getCurrentSystemInfo();
Set<GroupId> groupIds = colocateIndex.getAllGroupIds();
for (GroupId groupId : groupIds) {
Database db = globalStateMgr.getDbIncludeRecycleBin(groupId.dbId);
if (db == null) {
continue;
}
ClusterLoadStatistic statistic = globalStateMgr.getTabletScheduler().getLoadStatistic();
if (statistic == null) {
continue;
}
List<List<Long>> backendsPerBucketSeq = colocateIndex.getBackendsPerBucketSeq(groupId);
if (backendsPerBucketSeq.isEmpty()) {
continue;
}
Set<Long> unavailableBeIdsInGroup = getUnavailableBeIdsInGroup(infoService, colocateIndex, groupId);
List<Long> availableBeIds = getAvailableBeIds(infoService);
List<List<Long>> balancedBackendsPerBucketSeq = Lists.newArrayList();
if (relocateAndBalance(groupId, unavailableBeIdsInGroup, availableBeIds, colocateIndex, infoService,
statistic, balancedBackendsPerBucketSeq)) {
colocateIndex.addBackendsPerBucketSeq(groupId, balancedBackendsPerBucketSeq);
ColocatePersistInfo info =
ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, balancedBackendsPerBucketSeq);
globalStateMgr.getEditLog().logColocateBackendsPerBucketSeq(info);
LOG.info("balance colocate group {}. now backends per bucket sequence is: {}", groupId,
balancedBackendsPerBucketSeq);
}
}
}
/*
* Check every tablet of a group, if replica's location does not match backends in group, relocating those
* replicas, and mark that group as unstable.
* If every replicas match the backends in group, mark that group as stable.
*/
/*
* The balance logic is as follow:
*
* All backends: A,B,C,D,E,F,G,H,I,J
*
* One group's buckets sequence:
*
* Buckets sequence: 0 1 2 3
* Backend set: A A A A
* B D F H
* C E G I
*
* Then each backend has different replica num:
*
* Backends: A B C D E F G H I J
* Replica num: 4 1 1 1 1 1 1 1 1 0
*
* The goal of balance is to evenly distribute replicas on all backends. For this example, we want the
* following result (one possible result):
*
* Backends: A B C D E F G H I J
* Replica num: 2 2 1 1 1 1 1 1 1 1
*
* Algorithm:
* 0. Generate the flat list of backends per bucket sequence:
* A B C A D E A F G A H I
* 1. Sort backends order by replication num and load score for same replication num backends, descending:
* A B C D E F G H I J
* 2. Check the diff of the first backend(A)'s replica num and last backend(J)'s replica num.
* If diff is less or equal than 1, we consider this group as balance. Jump to step 5.
* 3. Else, Replace the first occurrence of Backend A in flat list with Backend J.
* J B C A D E A F G A H I
* 4. Recalculate the replica num of each backend and go to step 1.
* 5. We should get the following flat list(one possible result):
* J B C J D E A F G A H I
* Partition this flat list by replication num:
* [J B C] [J D E] [A F G] [A H I]
* And this is our new balanced backends per bucket sequence.
*
* relocate is similar to balance, but choosing unavailable be as src, and move all bucketIds on unavailable be to
* low be
*
* Return true if backends per bucket sequence change and new sequence is saved in balancedBackendsPerBucketSeq.
* Return false if nothing changed.
*/
private boolean relocateAndBalance(GroupId groupId, Set<Long> unavailableBeIds, List<Long> availableBeIds,
ColocateTableIndex colocateIndex, SystemInfoService infoService,
ClusterLoadStatistic statistic, List<List<Long>> balancedBackendsPerBucketSeq) {
ColocateGroupSchema groupSchema = colocateIndex.getGroupSchema(groupId);
int replicationNum = groupSchema.getReplicationNum();
List<List<Long>> backendsPerBucketSeq = Lists.newArrayList(colocateIndex.getBackendsPerBucketSeq(groupId));
List<Long> flatBackendsPerBucketSeq =
backendsPerBucketSeq.stream().flatMap(List::stream).collect(Collectors.toList());
boolean isChanged = false;
OUT:
while (true) {
backendsPerBucketSeq = Lists.partition(flatBackendsPerBucketSeq, replicationNum);
List<List<String>> hostsPerBucketSeq = getHostsPerBucketSeq(backendsPerBucketSeq, infoService);
Preconditions
.checkState(hostsPerBucketSeq != null && backendsPerBucketSeq.size() == hostsPerBucketSeq.size());
long srcBeId = -1;
List<Integer> srcBeSeqIndexes = null;
boolean hasUnavailableBe = false;
for (Long beId : unavailableBeIds) {
srcBeSeqIndexes = getBeSeqIndexes(flatBackendsPerBucketSeq, beId);
if (srcBeSeqIndexes.size() > 0) {
srcBeId = beId;
hasUnavailableBe = true;
break;
}
}
List<Map.Entry<Long, Long>> backendWithReplicaNum =
getSortedBackendReplicaNumPairs(availableBeIds, unavailableBeIds, statistic,
flatBackendsPerBucketSeq);
if (srcBeSeqIndexes == null || srcBeSeqIndexes.size() <= 0) {
if (backendWithReplicaNum.size() <= 1) {
break;
}
srcBeId = backendWithReplicaNum.get(0).getKey();
srcBeSeqIndexes = getBeSeqIndexes(flatBackendsPerBucketSeq, srcBeId);
} else if (backendWithReplicaNum.size() <= 0) {
break;
}
int leftBound;
if (hasUnavailableBe) {
leftBound = -1;
} else {
leftBound = 0;
}
int j = backendWithReplicaNum.size() - 1;
boolean isThisRoundChanged = false;
INNER:
while (j > leftBound) {
Map.Entry<Long, Long> lowBackend = backendWithReplicaNum.get(j);
if ((!hasUnavailableBe) && (srcBeSeqIndexes.size() - lowBackend.getValue()) <= 1) {
break OUT;
}
long destBeId = lowBackend.getKey();
Backend destBe = infoService.getBackend(destBeId);
if (destBe == null) {
LOG.info("backend {} does not exist", destBeId);
return false;
}
for (int seqIndex : srcBeSeqIndexes) {
int bucketIndex = seqIndex / replicationNum;
List<Long> backendsSet = backendsPerBucketSeq.get(bucketIndex);
List<String> hostsSet = hostsPerBucketSeq.get(bucketIndex);
if (!backendsSet.contains(destBeId) && !hostsSet.contains(destBe.getHost())) {
Preconditions.checkState(backendsSet.contains(srcBeId), srcBeId);
flatBackendsPerBucketSeq.set(seqIndex, destBeId);
LOG.info("replace backend {} with backend {} in colocate group {}", srcBeId, destBeId, groupId);
isChanged = true;
isThisRoundChanged = true;
break INNER;
}
}
LOG.info("unable to replace backend {} with backend {} in colocate group {}",
srcBeId, destBeId, groupId);
j--;
}
if (!isThisRoundChanged) {
LOG.info("all backends are checked but this round is not changed, " +
"end outer loop in colocate group {}", groupId);
break;
}
}
if (isChanged) {
balancedBackendsPerBucketSeq.addAll(Lists.partition(flatBackendsPerBucketSeq, replicationNum));
}
return isChanged;
}
private List<List<String>> getHostsPerBucketSeq(List<List<Long>> backendsPerBucketSeq,
SystemInfoService infoService) {
List<List<String>> hostsPerBucketSeq = Lists.newArrayList();
for (List<Long> backendIds : backendsPerBucketSeq) {
List<String> hosts = Lists.newArrayList();
for (Long beId : backendIds) {
Backend be = infoService.getBackend(beId);
if (be == null) {
LOG.info("backend {} does not exist", beId);
continue;
}
hosts.add(be.getHost());
}
hostsPerBucketSeq.add(hosts);
}
return hostsPerBucketSeq;
}
private List<Map.Entry<Long, Long>> getSortedBackendReplicaNumPairs(List<Long> allAvailBackendIds,
Set<Long> unavailBackendIds,
ClusterLoadStatistic statistic,
List<Long> flatBackendsPerBucketSeq) {
Map<Long, Long> backendToReplicaNum = flatBackendsPerBucketSeq.stream()
.collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
for (Long backendId : unavailBackendIds) {
backendToReplicaNum.remove(backendId);
}
for (Long backendId : allAvailBackendIds) {
if (!backendToReplicaNum.containsKey(backendId)) {
backendToReplicaNum.put(backendId, 0L);
}
}
return backendToReplicaNum
.entrySet()
.stream()
.sorted((entry1, entry2) -> {
if (!entry1.getValue().equals(entry2.getValue())) {
return (int) (entry2.getValue() - entry1.getValue());
}
BackendLoadStatistic beStat1 = statistic.getBackendLoadStatistic(entry1.getKey());
BackendLoadStatistic beStat2 = statistic.getBackendLoadStatistic(entry2.getKey());
if (beStat1 == null || beStat2 == null) {
return 0;
}
double loadScore1 = beStat1.getMixLoadScore();
double loadScore2 = beStat2.getMixLoadScore();
if (Math.abs(loadScore1 - loadScore2) < 1e-6) {
return 0;
} else if (loadScore2 > loadScore1) {
return 1;
} else {
return -1;
}
})
.collect(Collectors.toList());
}
/*
* get the array indexes of elements in flatBackendsPerBucketSeq which equals to beId
* eg:
* flatBackendsPerBucketSeq:
* A B C A D E A F G A H I
* and srcBeId is A.
* so seqIndexes is:
* 0 3 6 9
*/
private List<Integer> getBeSeqIndexes(List<Long> flatBackendsPerBucketSeq, long beId) {
return IntStream.range(0, flatBackendsPerBucketSeq.size()).boxed().filter(
idx -> flatBackendsPerBucketSeq.get(idx).equals(beId)).collect(Collectors.toList());
}
private Set<Long> getUnavailableBeIdsInGroup(SystemInfoService infoService, ColocateTableIndex colocateIndex,
GroupId groupId) {
Set<Long> backends = colocateIndex.getBackendsByGroup(groupId);
Set<Long> unavailableBeIds = Sets.newHashSet();
for (Long backendId : backends) {
if (!checkBackendAvailable(backendId, infoService)) {
unavailableBeIds.add(backendId);
}
}
return unavailableBeIds;
}
private List<Long> getAvailableBeIds(SystemInfoService infoService) {
List<Long> allBackendIds = infoService.getBackendIds(false);
List<Long> availableBeIds = Lists.newArrayList();
for (Long backendId : allBackendIds) {
if (checkBackendAvailable(backendId, infoService)) {
availableBeIds.add(backendId);
}
}
return availableBeIds;
}
/**
* check backend available
* backend stopped for a short period of time is still considered available
*/
private boolean checkBackendAvailable(Long backendId, SystemInfoService infoService) {
long currTime = System.currentTimeMillis();
Backend be = infoService.getBackend(backendId);
if (be == null) {
return false;
} else if (!be.isAvailable()) {
if ((!be.isAlive() &&
(currTime - be.getLastUpdateMs()) > Config.tablet_repair_delay_factor_second * 1000 * 2)
|| be.isDecommissioned()) {
return false;
}
}
return true;
}
} | class ColocateTableBalancer extends LeaderDaemon {
private static final Logger LOG = LogManager.getLogger(ColocateTableBalancer.class);
private static final long CHECK_INTERVAL_MS = 20 * 1000L;
private ColocateTableBalancer(long intervalMs) {
super("colocate group clone checker", intervalMs);
}
private static ColocateTableBalancer INSTANCE = null;
public static ColocateTableBalancer getInstance() {
if (INSTANCE == null) {
INSTANCE = new ColocateTableBalancer(CHECK_INTERVAL_MS);
}
return INSTANCE;
}
@Override
/*
* Each round, we do 2 steps:
* 1. Relocate and balance group:
* Backend is not available, find a new backend to replace it.
* and after all unavailable has been replaced, balance the group
*
* 2. Match group:
* If replica mismatch backends in a group, that group will be marked as unstable, and pass that
* tablet to TabletScheduler.
* Otherwise, mark the group as stable
*/
protected void runAfterCatalogReady() {
relocateAndBalanceGroup();
matchGroup();
}
/*
* relocate and balance group
* here we just let replicas in colocate table evenly distributed in cluster, not consider the
* cluster load statistic.
* for example:
* currently there are 4 backends A B C D with following load:
*
* +-+
* | |
* +-+ +-+ +-+ | |
* | | | | | | | |
* +-+ +-+ +-+ +-+
* A B C D
*
* And colocate group balancer will still evenly distribute the replicas to all 4 backends, not
* just 3 low load backends.
*
* X
* X
* X X X +-+
* X X X | |
* +-+ +-+ +-+ | |
* | | | | | | | |
* +-+ +-+ +-+ +-+
* A B C D
*
* So After colocate balance, the cluster may still 'unbalanced' from a global perspective.
* And the LoadBalancer will balance the non-colocate table's replicas to make the
* cluster balance, eventually.
*
* X X X X
* X X X X
* +-+ +-+ +-+ +-+
* | | | | | | | |
* | | | | | | | |
* +-+ +-+ +-+ +-+
* A B C D
*/
private void relocateAndBalanceGroup() {
if (Config.disable_colocate_balance) {
return;
}
GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState();
ColocateTableIndex colocateIndex = globalStateMgr.getColocateTableIndex();
SystemInfoService infoService = GlobalStateMgr.getCurrentSystemInfo();
Set<GroupId> groupIds = colocateIndex.getAllGroupIds();
for (GroupId groupId : groupIds) {
Database db = globalStateMgr.getDbIncludeRecycleBin(groupId.dbId);
if (db == null) {
continue;
}
ClusterLoadStatistic statistic = globalStateMgr.getTabletScheduler().getLoadStatistic();
if (statistic == null) {
continue;
}
List<List<Long>> backendsPerBucketSeq = colocateIndex.getBackendsPerBucketSeq(groupId);
if (backendsPerBucketSeq.isEmpty()) {
continue;
}
Set<Long> unavailableBeIdsInGroup = getUnavailableBeIdsInGroup(infoService, colocateIndex, groupId);
List<Long> availableBeIds = getAvailableBeIds(infoService);
List<List<Long>> balancedBackendsPerBucketSeq = Lists.newArrayList();
if (relocateAndBalance(groupId, unavailableBeIdsInGroup, availableBeIds, colocateIndex, infoService,
statistic, balancedBackendsPerBucketSeq)) {
colocateIndex.addBackendsPerBucketSeq(groupId, balancedBackendsPerBucketSeq);
ColocatePersistInfo info =
ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, balancedBackendsPerBucketSeq);
globalStateMgr.getEditLog().logColocateBackendsPerBucketSeq(info);
LOG.info("balance colocate group {}. now backends per bucket sequence is: {}", groupId,
balancedBackendsPerBucketSeq);
}
}
}
/*
* Check every tablet of a group, if replica's location does not match backends in group, relocating those
* replicas, and mark that group as unstable.
* If every replicas match the backends in group, mark that group as stable.
*/
/*
* The balance logic is as follow:
*
* All backends: A,B,C,D,E,F,G,H,I,J
*
* One group's buckets sequence:
*
* Buckets sequence: 0 1 2 3
* Backend set: A A A A
* B D F H
* C E G I
*
* Then each backend has different replica num:
*
* Backends: A B C D E F G H I J
* Replica num: 4 1 1 1 1 1 1 1 1 0
*
* The goal of balance is to evenly distribute replicas on all backends. For this example, we want the
* following result (one possible result):
*
* Backends: A B C D E F G H I J
* Replica num: 2 2 1 1 1 1 1 1 1 1
*
* Algorithm:
* 0. Generate the flat list of backends per bucket sequence:
* A B C A D E A F G A H I
* 1. Sort backends order by replication num and load score for same replication num backends, descending:
* A B C D E F G H I J
* 2. Check the diff of the first backend(A)'s replica num and last backend(J)'s replica num.
* If diff is less or equal than 1, we consider this group as balance. Jump to step 5.
* 3. Else, Replace the first occurrence of Backend A in flat list with Backend J.
* J B C A D E A F G A H I
* 4. Recalculate the replica num of each backend and go to step 1.
* 5. We should get the following flat list(one possible result):
* J B C J D E A F G A H I
* Partition this flat list by replication num:
* [J B C] [J D E] [A F G] [A H I]
* And this is our new balanced backends per bucket sequence.
*
* relocate is similar to balance, but choosing unavailable be as src, and move all bucketIds on unavailable be to
* low be
*
* Return true if backends per bucket sequence change and new sequence is saved in balancedBackendsPerBucketSeq.
* Return false if nothing changed.
*/
private boolean relocateAndBalance(GroupId groupId, Set<Long> unavailableBeIds, List<Long> availableBeIds,
ColocateTableIndex colocateIndex, SystemInfoService infoService,
ClusterLoadStatistic statistic, List<List<Long>> balancedBackendsPerBucketSeq) {
ColocateGroupSchema groupSchema = colocateIndex.getGroupSchema(groupId);
int replicationNum = groupSchema.getReplicationNum();
List<List<Long>> backendsPerBucketSeq = Lists.newArrayList(colocateIndex.getBackendsPerBucketSeq(groupId));
List<Long> flatBackendsPerBucketSeq =
backendsPerBucketSeq.stream().flatMap(List::stream).collect(Collectors.toList());
boolean isChanged = false;
OUT:
while (true) {
backendsPerBucketSeq = Lists.partition(flatBackendsPerBucketSeq, replicationNum);
List<List<String>> hostsPerBucketSeq = getHostsPerBucketSeq(backendsPerBucketSeq, infoService);
Preconditions
.checkState(hostsPerBucketSeq != null && backendsPerBucketSeq.size() == hostsPerBucketSeq.size());
long srcBeId = -1;
List<Integer> srcBeSeqIndexes = null;
boolean hasUnavailableBe = false;
for (Long beId : unavailableBeIds) {
srcBeSeqIndexes = getBeSeqIndexes(flatBackendsPerBucketSeq, beId);
if (srcBeSeqIndexes.size() > 0) {
srcBeId = beId;
hasUnavailableBe = true;
break;
}
}
List<Map.Entry<Long, Long>> backendWithReplicaNum =
getSortedBackendReplicaNumPairs(availableBeIds, unavailableBeIds, statistic,
flatBackendsPerBucketSeq);
if (srcBeSeqIndexes == null || srcBeSeqIndexes.size() <= 0) {
if (backendWithReplicaNum.size() <= 1) {
break;
}
srcBeId = backendWithReplicaNum.get(0).getKey();
srcBeSeqIndexes = getBeSeqIndexes(flatBackendsPerBucketSeq, srcBeId);
} else if (backendWithReplicaNum.size() <= 0) {
break;
}
int leftBound;
if (hasUnavailableBe) {
leftBound = -1;
} else {
leftBound = 0;
}
int j = backendWithReplicaNum.size() - 1;
boolean isThisRoundChanged = false;
INNER:
while (j > leftBound) {
Map.Entry<Long, Long> lowBackend = backendWithReplicaNum.get(j);
if ((!hasUnavailableBe) && (srcBeSeqIndexes.size() - lowBackend.getValue()) <= 1) {
break OUT;
}
long destBeId = lowBackend.getKey();
Backend destBe = infoService.getBackend(destBeId);
if (destBe == null) {
LOG.info("backend {} does not exist", destBeId);
return false;
}
for (int seqIndex : srcBeSeqIndexes) {
int bucketIndex = seqIndex / replicationNum;
List<Long> backendsSet = backendsPerBucketSeq.get(bucketIndex);
List<String> hostsSet = hostsPerBucketSeq.get(bucketIndex);
if (!backendsSet.contains(destBeId) && !hostsSet.contains(destBe.getHost())) {
Preconditions.checkState(backendsSet.contains(srcBeId), srcBeId);
flatBackendsPerBucketSeq.set(seqIndex, destBeId);
LOG.info("replace backend {} with backend {} in colocate group {}", srcBeId, destBeId, groupId);
isChanged = true;
isThisRoundChanged = true;
break INNER;
}
}
LOG.info("unable to replace backend {} with backend {} in colocate group {}",
srcBeId, destBeId, groupId);
j--;
}
if (!isThisRoundChanged) {
LOG.info("all backends are checked but this round is not changed, " +
"end outer loop in colocate group {}", groupId);
break;
}
}
if (isChanged) {
balancedBackendsPerBucketSeq.addAll(Lists.partition(flatBackendsPerBucketSeq, replicationNum));
}
return isChanged;
}
private List<List<String>> getHostsPerBucketSeq(List<List<Long>> backendsPerBucketSeq,
SystemInfoService infoService) {
List<List<String>> hostsPerBucketSeq = Lists.newArrayList();
for (List<Long> backendIds : backendsPerBucketSeq) {
List<String> hosts = Lists.newArrayList();
for (Long beId : backendIds) {
Backend be = infoService.getBackend(beId);
if (be == null) {
LOG.info("backend {} does not exist", beId);
continue;
}
hosts.add(be.getHost());
}
hostsPerBucketSeq.add(hosts);
}
return hostsPerBucketSeq;
}
private List<Map.Entry<Long, Long>> getSortedBackendReplicaNumPairs(List<Long> allAvailBackendIds,
Set<Long> unavailBackendIds,
ClusterLoadStatistic statistic,
List<Long> flatBackendsPerBucketSeq) {
Map<Long, Long> backendToReplicaNum = flatBackendsPerBucketSeq.stream()
.collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
for (Long backendId : unavailBackendIds) {
backendToReplicaNum.remove(backendId);
}
for (Long backendId : allAvailBackendIds) {
if (!backendToReplicaNum.containsKey(backendId)) {
backendToReplicaNum.put(backendId, 0L);
}
}
return backendToReplicaNum
.entrySet()
.stream()
.sorted((entry1, entry2) -> {
if (!entry1.getValue().equals(entry2.getValue())) {
return (int) (entry2.getValue() - entry1.getValue());
}
BackendLoadStatistic beStat1 = statistic.getBackendLoadStatistic(entry1.getKey());
BackendLoadStatistic beStat2 = statistic.getBackendLoadStatistic(entry2.getKey());
if (beStat1 == null || beStat2 == null) {
return 0;
}
double loadScore1 = beStat1.getMixLoadScore();
double loadScore2 = beStat2.getMixLoadScore();
if (Math.abs(loadScore1 - loadScore2) < 1e-6) {
return 0;
} else if (loadScore2 > loadScore1) {
return 1;
} else {
return -1;
}
})
.collect(Collectors.toList());
}
/*
* get the array indexes of elements in flatBackendsPerBucketSeq which equals to beId
* eg:
* flatBackendsPerBucketSeq:
* A B C A D E A F G A H I
* and srcBeId is A.
* so seqIndexes is:
* 0 3 6 9
*/
private List<Integer> getBeSeqIndexes(List<Long> flatBackendsPerBucketSeq, long beId) {
return IntStream.range(0, flatBackendsPerBucketSeq.size()).boxed().filter(
idx -> flatBackendsPerBucketSeq.get(idx).equals(beId)).collect(Collectors.toList());
}
private Set<Long> getUnavailableBeIdsInGroup(SystemInfoService infoService, ColocateTableIndex colocateIndex,
GroupId groupId) {
Set<Long> backends = colocateIndex.getBackendsByGroup(groupId);
Set<Long> unavailableBeIds = Sets.newHashSet();
for (Long backendId : backends) {
if (!checkBackendAvailable(backendId, infoService)) {
unavailableBeIds.add(backendId);
}
}
return unavailableBeIds;
}
private List<Long> getAvailableBeIds(SystemInfoService infoService) {
List<Long> allBackendIds = infoService.getBackendIds(false);
List<Long> availableBeIds = Lists.newArrayList();
for (Long backendId : allBackendIds) {
if (checkBackendAvailable(backendId, infoService)) {
availableBeIds.add(backendId);
}
}
return availableBeIds;
}
/**
* check backend available
* backend stopped for a short period of time is still considered available
*/
private boolean checkBackendAvailable(Long backendId, SystemInfoService infoService) {
long currTime = System.currentTimeMillis();
Backend be = infoService.getBackend(backendId);
if (be == null) {
return false;
} else if (!be.isAvailable()) {
if ((!be.isAlive() &&
(currTime - be.getLastUpdateMs()) > Config.tablet_repair_delay_factor_second * 1000 * 2)
|| be.isDecommissioned()) {
return false;
}
}
return true;
}
} |
Why not use Set<TNetworkAddress>? In that way, hosts is not needed. | private void computeFragmentHosts() throws Exception {
for (int i = fragments.size() - 1; i >= 0; --i) {
PlanFragment fragment = fragments.get(i);
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
if (fragment.getDataPartition() == DataPartition.UNPARTITIONED) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
LOG.warn("DataPartition UNPARTITIONED, no scanNode Backend");
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
continue;
}
PlanNode leftMostNode = findLeftmostNode(fragment.getPlanRoot());
boolean hasUnionNode = containsUnionNode(fragment.getPlanRoot());
if (!(leftMostNode instanceof ScanNode) && !hasUnionNode) {
PlanFragmentId inputFragmentIdx =
fragments.get(i).getChild(0).getFragmentId();
int doris_exchange_instances= -1;
if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) {
doris_exchange_instances = ConnectContext.get().getSessionVariable().getDorisExchangeInstances();
}
if (doris_exchange_instances > 0 && fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams.size() > doris_exchange_instances) {
List<TNetworkAddress> hosts = Lists.newArrayList();
Set<String> cache = new HashSet<String>();
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
String hostPort = execParams.host.getHostname() + execParams.host.getPort();
if (!cache.contains(hostPort)) {
hosts.add(execParams.host);
cache.add(hostPort);
}
}
Collections.shuffle(hosts, instanceRandom);
for (int index = 0; index < doris_exchange_instances; index++) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()),0, params);
params.instanceExecParams.add(instanceParam);
}
} else {
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host,0, params);
params.instanceExecParams.add(instanceParam);
}
}
Collections.shuffle(params.instanceExecParams, instanceRandom);
continue;
}
if (bucketSeqToAddress.size() > 0 && isColocateJoin(fragment.getPlanRoot())) {
for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, bucketSeqToAddress.get(scanRanges.getKey()), 0, params);
Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue();
for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRanges.entrySet()) {
instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue());
}
params.instanceExecParams.add(instanceParam);
}
} else {
Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator();
int parallelExecInstanceNum = fragment.getParallel_exec_num();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
TNetworkAddress key = (TNetworkAddress) entry.getKey();
Map<Integer, List<TScanRangeParams>> value = (Map<Integer, List<TScanRangeParams>>) entry.getValue();
for (Integer planNodeId : value.keySet()) {
List<TScanRangeParams> perNodeScanRanges = value.get(planNodeId);
int expectedInstanceNum = 1;
if (parallelExecInstanceNum > 1) {
expectedInstanceNum = Math.min(perNodeScanRanges.size(), parallelExecInstanceNum);
}
List<List<TScanRangeParams>> perInstanceScanRanges = ListUtil.splitBySize(perNodeScanRanges,
expectedInstanceNum);
for (List<TScanRangeParams> scanRangeParams : perInstanceScanRanges) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, key, 0, params);
instanceParam.perNodeScanRanges.put(planNodeId, scanRangeParams);
params.instanceExecParams.add(instanceParam);
}
}
}
}
if (params.instanceExecParams.isEmpty()) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
}
}
} | Set<String> cache = new HashSet<String>(); | private void computeFragmentHosts() throws Exception {
for (int i = fragments.size() - 1; i >= 0; --i) {
PlanFragment fragment = fragments.get(i);
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
if (fragment.getDataPartition() == DataPartition.UNPARTITIONED) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
LOG.warn("DataPartition UNPARTITIONED, no scanNode Backend");
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
continue;
}
PlanNode leftMostNode = findLeftmostNode(fragment.getPlanRoot());
boolean hasUnionNode = containsUnionNode(fragment.getPlanRoot());
if (!(leftMostNode instanceof ScanNode) && !hasUnionNode) {
PlanFragmentId inputFragmentIdx =
fragments.get(i).getChild(0).getFragmentId();
int exchangeInstances = -1;
if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) {
exchangeInstances = ConnectContext.get().getSessionVariable().getExchangeInstanceParallel();
}
if (exchangeInstances > 0 && fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams.size() > exchangeInstances) {
Set<TNetworkAddress> hostSet = Sets.newHashSet();
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
hostSet.add(execParams.host);
}
List<TNetworkAddress> hosts = Lists.newArrayList(hostSet);
Collections.shuffle(hosts, instanceRandom);
for (int index = 0; index < exchangeInstances; index++) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()), 0, params);
params.instanceExecParams.add(instanceParam);
}
} else {
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host, 0, params);
params.instanceExecParams.add(instanceParam);
}
}
Collections.shuffle(params.instanceExecParams, instanceRandom);
continue;
}
if (bucketSeqToAddress.size() > 0 && isColocateJoin(fragment.getPlanRoot())) {
for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, bucketSeqToAddress.get(scanRanges.getKey()), 0, params);
Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue();
for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRanges.entrySet()) {
instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue());
}
params.instanceExecParams.add(instanceParam);
}
} else {
Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator();
int parallelExecInstanceNum = fragment.getParallel_exec_num();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
TNetworkAddress key = (TNetworkAddress) entry.getKey();
Map<Integer, List<TScanRangeParams>> value = (Map<Integer, List<TScanRangeParams>>) entry.getValue();
for (Integer planNodeId : value.keySet()) {
List<TScanRangeParams> perNodeScanRanges = value.get(planNodeId);
int expectedInstanceNum = 1;
if (parallelExecInstanceNum > 1) {
expectedInstanceNum = Math.min(perNodeScanRanges.size(), parallelExecInstanceNum);
}
List<List<TScanRangeParams>> perInstanceScanRanges = ListUtil.splitBySize(perNodeScanRanges,
expectedInstanceNum);
for (List<TScanRangeParams> scanRangeParams : perInstanceScanRanges) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, key, 0, params);
instanceParam.perNodeScanRanges.put(planNodeId, scanRangeParams);
params.instanceExecParams.add(instanceParam);
}
}
}
}
if (params.instanceExecParams.isEmpty()) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
}
}
} | class Coordinator {
private static final Logger LOG = LogManager.getLogger(Coordinator.class);
private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private static String localIP = FrontendOptions.getLocalHostAddress();
private static Random instanceRandom = new Random();
Status queryStatus = new Status();
Map<TNetworkAddress, Long> addressToBackendID = Maps.newHashMap();
private ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of();
private TDescriptorTable descTable;
private TQueryGlobals queryGlobals = new TQueryGlobals();
private TQueryOptions queryOptions;
private TNetworkAddress coordAddress;
private Lock lock = new ReentrantLock();
private boolean returnedAllResults;
private RuntimeProfile queryProfile;
private List<RuntimeProfile> fragmentProfile;
private Map<PlanFragmentId, FragmentExecParams> fragmentExecParamsMap = Maps.newHashMap();
private List<PlanFragment> fragments;
private List<BackendExecState> backendExecStates = Lists.newArrayList();
private ResultReceiver receiver;
private ConcurrentMap<TUniqueId, BackendExecState> backendExecStateMap =
Maps.newConcurrentMap();
private List<ScanNode> scanNodes;
private Set<TUniqueId> instanceIds = Sets.newHashSet();
private MarkedCountDownLatch<TUniqueId, Long> profileDoneSignal;
private boolean isBlockQuery;
private int numReceivedRows = 0;
private List<String> deltaUrls;
private Map<String, String> loadCounters;
private String trackingUrl;
private List<String> exportFiles;
private List<TTabletCommitInfo> commitInfos = Lists.newArrayList();
private long jobId = -1;
private TUniqueId queryId;
private TResourceInfo tResourceInfo;
private boolean needReport;
private String clusterName;
private final TUniqueId nextInstanceId;
public Coordinator(ConnectContext context, Analyzer analyzer, Planner planner) {
this.isBlockQuery = planner.isBlockQuery();
this.queryId = context.queryId();
this.fragments = planner.getFragments();
this.scanNodes = planner.getScanNodes();
this.descTable = analyzer.getDescTbl().toThrift();
this.returnedAllResults = false;
this.queryOptions = context.getSessionVariable().toThrift();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
if (context.getSessionVariable().getTimeZone().equals("CST")) {
this.queryGlobals.setTime_zone(TimeUtils.DEFAULT_TIME_ZONE);
} else {
this.queryGlobals.setTime_zone(context.getSessionVariable().getTimeZone());
}
this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(),
context.getSessionVariable().getResourceGroup());
this.needReport = context.getSessionVariable().isReportSucc();
this.clusterName = context.getClusterName();
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public Coordinator(Long jobId, TUniqueId queryId, DescriptorTable descTable,
List<PlanFragment> fragments, List<ScanNode> scanNodes, String cluster, String timezone) {
this.isBlockQuery = true;
this.jobId = jobId;
this.queryId = queryId;
this.descTable = descTable.toThrift();
this.fragments = fragments;
this.scanNodes = scanNodes;
this.queryOptions = new TQueryOptions();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
this.queryGlobals.setTime_zone(timezone);
this.tResourceInfo = new TResourceInfo("", "");
this.needReport = true;
this.clusterName = cluster;
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public long getJobId() {
return jobId;
}
public TUniqueId getQueryId() {
return queryId;
}
public void setQueryId(TUniqueId queryId) {
this.queryId = queryId;
}
public void setQueryType(TQueryType type) {
this.queryOptions.setQuery_type(type);
}
public Status getExecStatus() {
return queryStatus;
}
public RuntimeProfile getQueryProfile() {
return queryProfile;
}
public List<String> getDeltaUrls() {
return deltaUrls;
}
public Map<String, String> getLoadCounters() {
return loadCounters;
}
public String getTrackingUrl() {
return trackingUrl;
}
public void setExecMemoryLimit(long execMemoryLimit) {
this.queryOptions.setMem_limit(execMemoryLimit);
}
public void setTimeout(int timeout) {
this.queryOptions.setQuery_timeout(timeout);
}
public void clearExportStatus() {
lock.lock();
try {
this.backendExecStates.clear();
this.backendExecStateMap.clear();
this.queryStatus.setStatus(new Status());
if (this.exportFiles == null) {
this.exportFiles = Lists.newArrayList();
}
this.exportFiles.clear();
} finally {
lock.unlock();
}
}
public List<TTabletCommitInfo> getCommitInfos() {
return commitInfos;
}
private void prepare() {
for (PlanFragment fragment : fragments) {
fragmentExecParamsMap.put(fragment.getFragmentId(), new FragmentExecParams(fragment));
}
for (PlanFragment fragment : fragments) {
if (!(fragment.getSink() instanceof DataStreamSink)) {
continue;
}
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getDestFragment().getFragmentId());
params.inputFragments.add(fragment.getFragmentId());
}
coordAddress = new TNetworkAddress(localIP, Config.rpc_port);
int fragmentSize = fragments.size();
queryProfile = new RuntimeProfile("Execution Profile " + DebugUtil.printId(queryId));
fragmentProfile = new ArrayList<RuntimeProfile>();
for (int i = 0; i < fragmentSize; i ++) {
fragmentProfile.add(new RuntimeProfile("Fragment " + i));
queryProfile.addChild(fragmentProfile.get(i));
}
this.idToBackend = Catalog.getCurrentSystemInfo().getBackendsInCluster(clusterName);
if (LOG.isDebugEnabled()) {
LOG.debug("idToBackend size={}", idToBackend.size());
for (Map.Entry<Long, Backend> entry : idToBackend.entrySet()) {
Long backendID = entry.getKey();
Backend backend = entry.getValue();
LOG.debug("backend: {}-{}-{}", backendID, backend.getHost(), backend.getBePort());
}
}
}
private void lock() {
lock.lock();
}
private void unlock() {
lock.unlock();
}
private void traceInstance() {
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
int idx = 0;
sb.append("query id=").append(DebugUtil.printId(queryId)).append(",");
sb.append("fragment=[");
for (Map.Entry<PlanFragmentId, FragmentExecParams> entry : fragmentExecParamsMap.entrySet()) {
if (idx++ != 0) {
sb.append(",");
}
sb.append(entry.getKey());
entry.getValue().appendTo(sb);
}
sb.append("]");
LOG.debug(sb.toString());
}
}
public void exec() throws Exception {
if (!scanNodes.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, planNode: {}",
DebugUtil.printId(queryId), scanNodes.get(0).treeToThrift());
}
if (!fragments.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, fragment: {}",
DebugUtil.printId(queryId), fragments.get(0).toThrift());
}
prepare();
computeScanRangeAssignment();
computeFragmentExecParams();
traceInstance();
PlanFragmentId topId = fragments.get(0).getFragmentId();
FragmentExecParams topParams = fragmentExecParamsMap.get(topId);
if (topParams.fragment.getSink() instanceof ResultSink) {
receiver = new ResultReceiver(
topParams.instanceExecParams.get(0).instanceId,
addressToBackendID.get(topParams.instanceExecParams.get(0).host),
toBrpcHost(topParams.instanceExecParams.get(0).host),
queryOptions.query_timeout * 1000);
} else {
this.queryOptions.setIs_report_success(true);
deltaUrls = Lists.newArrayList();
loadCounters = Maps.newHashMap();
}
profileDoneSignal = new MarkedCountDownLatch<TUniqueId, Long>(instanceIds.size());
for (TUniqueId instanceId : instanceIds) {
profileDoneSignal.addMark(instanceId, -1L /* value is meaningless */);
}
lock();
try {
int backendId = 0;
int profileFragmentId = 0;
long memoryLimit = queryOptions.getMem_limit();
for (PlanFragment fragment : fragments) {
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
int instanceNum = params.instanceExecParams.size();
Preconditions.checkState(instanceNum > 0);
List<TExecPlanFragmentParams> tParams = params.toThrift(backendId);
List<Pair<BackendExecState, Future<PExecPlanFragmentResult>>> futures = Lists.newArrayList();
if (colocateFragmentIds.contains(fragment.getFragmentId().asInt())) {
int rate = Math.min(Config.query_colocate_join_memory_limit_penalty_factor, instanceNum);
long newmemory = memoryLimit / rate;
for (TExecPlanFragmentParams tParam : tParams) {
tParam.query_options.setMem_limit(newmemory);
}
}
int instanceId = 0;
for (TExecPlanFragmentParams tParam : tParams) {
BackendExecState execState =
new BackendExecState(fragment.getFragmentId(), instanceId++,
profileFragmentId, tParam, this.addressToBackendID);
backendExecStates.add(execState);
backendExecStateMap.put(tParam.params.getFragment_instance_id(), execState);
futures.add(Pair.create(execState, execState.execRemoteFragmentAsync()));
backendId++;
}
for (Pair<BackendExecState, Future<PExecPlanFragmentResult>> pair : futures) {
TStatusCode code = TStatusCode.INTERNAL_ERROR;
String errMsg = null;
try {
PExecPlanFragmentResult result = pair.second.get(Config.remote_fragment_exec_timeout_ms,
TimeUnit.MILLISECONDS);
code = TStatusCode.findByValue(result.status.status_code);
if (result.status.error_msgs != null && !result.status.error_msgs.isEmpty()) {
errMsg = result.status.error_msgs.get(0);
}
} catch (ExecutionException e) {
LOG.warn("catch a execute exception", e);
code = TStatusCode.THRIFT_RPC_ERROR;
} catch (InterruptedException e) {
LOG.warn("catch a interrupt exception", e);
code = TStatusCode.INTERNAL_ERROR;
} catch (TimeoutException e) {
LOG.warn("catch a timeout exception", e);
code = TStatusCode.TIMEOUT;
}
if (code != TStatusCode.OK) {
if (errMsg == null) {
errMsg = "exec rpc error. backend id: " + pair.first.backendId;
}
queryStatus.setStatus(errMsg);
LOG.warn("exec plan fragment failed, errmsg={}, fragmentId={}, backend={}:{}",
errMsg, fragment.getFragmentId(),
pair.first.address.hostname, pair.first.address.port);
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
switch (code) {
case TIMEOUT:
throw new UserException("query timeout. backend id: " + pair.first.backendId);
case THRIFT_RPC_ERROR:
SimpleScheduler.updateBlacklistBackends(pair.first.backendId);
throw new RpcException("rpc failed. backend id: " + pair.first.backendId);
default:
throw new UserException(errMsg);
}
}
}
profileFragmentId += 1;
}
attachInstanceProfileToFragmentProfile();
} finally {
unlock();
}
}
public List<String> getExportFiles() {
return exportFiles;
}
void updateExportFiles(List<String> files) {
lock.lock();
try {
if (exportFiles == null) {
exportFiles = Lists.newArrayList();
}
exportFiles.addAll(files);
} finally {
lock.unlock();
}
}
void updateDeltas(List<String> urls) {
lock.lock();
try {
deltaUrls.addAll(urls);
} finally {
lock.unlock();
}
}
private void updateLoadCounters(Map<String, String> newLoadCounters) {
lock.lock();
try {
long numRowsNormal = 0L;
String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal = Long.valueOf(value);
}
long numRowsAbnormal = 0L;
value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal = Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal += Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal += Long.valueOf(value);
}
this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal);
this.loadCounters.put(LoadEtlTask.DPP_ABNORMAL_ALL, "" + numRowsAbnormal);
} finally {
lock.unlock();
}
}
private void updateCommitInfos(List<TTabletCommitInfo> commitInfos) {
lock.lock();
try {
this.commitInfos.addAll(commitInfos);
} finally {
lock.unlock();
}
}
private void updateStatus(Status status, TUniqueId instanceId) {
lock.lock();
try {
if (returnedAllResults && status.isCancelled()) {
return;
}
if (status.ok()) {
return;
}
if (!queryStatus.ok()) {
return;
}
queryStatus.setStatus(status);
LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}",
jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN");
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
} finally {
lock.unlock();
}
}
public RowBatch getNext() throws Exception {
if (receiver == null) {
throw new UserException("There is no receiver.");
}
RowBatch resultBatch;
Status status = new Status();
resultBatch = receiver.getNext(status);
if (!status.ok()) {
LOG.warn("get next fail, need cancel. query id: {}", DebugUtil.printId(queryId));
}
updateStatus(status, null /* no instance id */);
Status copyStatus = null;
lock();
try {
copyStatus = new Status(queryStatus);
} finally {
unlock();
}
if (!copyStatus.ok()) {
if (Strings.isNullOrEmpty(copyStatus.getErrorMsg())) {
copyStatus.rewriteErrorMsg();
}
if (copyStatus.isRpcError()) {
throw new RpcException(copyStatus.getErrorMsg());
} else {
String errMsg = copyStatus.getErrorMsg();
LOG.warn("query failed: {}", errMsg);
int hostIndex = errMsg.indexOf("host");
if (hostIndex != -1) {
errMsg = errMsg.substring(0, hostIndex);
}
throw new UserException(errMsg);
}
}
if (resultBatch.isEos()) {
this.returnedAllResults = true;
Long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
boolean hasLimit = numLimitRows > 0;
if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) {
LOG.debug("no block query, return num >= limit rows, need cancel");
cancelInternal(PPlanFragmentCancelReason.LIMIT_REACH);
}
} else {
numReceivedRows += resultBatch.getBatch().getRowsSize();
}
return resultBatch;
}
public void cancel() {
lock();
try {
if (!queryStatus.ok()) {
return;
} else {
queryStatus.setStatus(Status.CANCELLED);
}
LOG.warn("cancel execution of query, this is outside invoke");
cancelInternal(PPlanFragmentCancelReason.USER_CANCEL);
} finally {
unlock();
}
}
private void cancelInternal(PPlanFragmentCancelReason cancelReason) {
if (null != receiver) {
receiver.cancel();
}
cancelRemoteFragmentsAsync(cancelReason);
if (profileDoneSignal != null) {
profileDoneSignal.countDownToZero(new Status());
LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e->DebugUtil.printId(e.getKey())).toArray());
}
}
private void cancelRemoteFragmentsAsync(PPlanFragmentCancelReason cancelReason) {
for (BackendExecState backendExecState : backendExecStates) {
TNetworkAddress address = backendExecState.getBackendAddress();
LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} ip={} port={} fragment instance id={}, reason: {}",
backendExecState.initiated, backendExecState.done, backendExecState.hasCanceled,
address.hostname, address.port, DebugUtil.printId(backendExecState.getFragmentInstanceId()),
cancelReason.name());
backendExecState.lock();
try {
if (!backendExecState.initiated) {
continue;
}
if (backendExecState.done) {
continue;
}
if (backendExecState.hasCanceled) {
continue;
}
TNetworkAddress brpcAddress = toBrpcHost(address);
try {
BackendServiceProxy.getInstance().cancelPlanFragmentAsync(
brpcAddress, backendExecState.getFragmentInstanceId(), cancelReason);
} catch (RpcException e) {
LOG.warn("cancel plan fragment get a exception, address={}:{}",
brpcAddress.getHostname(), brpcAddress.getPort());
SimpleScheduler.updateBlacklistBackends(addressToBackendID.get(brpcAddress));
}
backendExecState.hasCanceled = true;
} catch (Exception e) {
LOG.warn("catch a exception", e);
} finally {
backendExecState.unlock();
}
}
}
private void computeFragmentExecParams() throws Exception {
computeFragmentHosts();
instanceIds.clear();
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size());
for (int j = 0; j < params.instanceExecParams.size(); ++j) {
TUniqueId instanceId = new TUniqueId();
instanceId.setHi(queryId.hi);
instanceId.setLo(queryId.lo + instanceIds.size() + 1);
params.instanceExecParams.get(j).instanceId = instanceId;
instanceIds.add(instanceId);
}
}
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
PlanFragment destFragment = params.fragment.getDestFragment();
if (destFragment == null) {
continue;
}
FragmentExecParams destParams = fragmentExecParamsMap.get(destFragment.getFragmentId());
DataSink sink = params.fragment.getSink();
PlanNodeId exchId = sink.getExchNodeId();
if (destParams.perExchNumSenders.get(exchId.asInt()) == null) {
destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size());
} else {
destParams.perExchNumSenders.put(exchId.asInt(),
params.instanceExecParams.size() + destParams.perExchNumSenders.get(exchId.asInt()));
}
for (int j = 0; j < destParams.instanceExecParams.size(); ++j) {
TPlanFragmentDestination dest = new TPlanFragmentDestination();
dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId;
dest.server = toRpcHost(destParams.instanceExecParams.get(j).host);
dest.setBrpc_server(toBrpcHost(destParams.instanceExecParams.get(j).host));
params.destinations.add(dest);
}
}
}
private TNetworkAddress toRpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
TNetworkAddress dest = new TNetworkAddress(backend.getHost(), backend.getBeRpcPort());
return dest;
}
private TNetworkAddress toBrpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
if (backend.getBrpcPort() < 0) {
return null;
}
return new TNetworkAddress(backend.getHost(), backend.getBrpcPort());
}
private boolean containsUnionNode(PlanNode node) {
if (node instanceof UnionNode) {
return true;
}
for (PlanNode child : node.getChildren()) {
if (child instanceof ExchangeNode) {
continue;
} else if (child instanceof UnionNode) {
return true;
} else {
return containsUnionNode(child);
}
}
return false;
}
private boolean isColocateJoin(PlanNode node) {
if (Config.disable_colocate_join) {
return false;
}
if (ConnectContext.get() != null) {
if (ConnectContext.get().getSessionVariable().isDisableColocateJoin()) {
return false;
}
}
if (colocateFragmentIds.contains(node.getFragmentId().asInt())) {
return true;
}
if (node instanceof HashJoinNode) {
HashJoinNode joinNode = (HashJoinNode) node;
if (joinNode.isColocate()) {
colocateFragmentIds.add(joinNode.getFragmentId().asInt());
return true;
}
}
for (PlanNode childNode : node.getChildren()) {
return isColocateJoin(childNode);
}
return false;
}
private PlanNode findLeftmostNode(PlanNode plan) {
PlanNode newPlan = plan;
while (newPlan.getChildren().size() != 0 && !(newPlan instanceof ExchangeNode)) {
newPlan = newPlan.getChild(0);
}
return newPlan;
}
private <K, V> V findOrInsert(HashMap<K, V> m, final K key, final V defaultVal) {
V value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private List<TScanRangeParams> findOrInsert(Map<Integer, List<TScanRangeParams>> m, Integer key,
ArrayList<TScanRangeParams> defaultVal) {
List<TScanRangeParams> value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private long getScanRangeLength(final TScanRange scanRange) {
return 1;
}
private void computeScanRangeAssignment() throws Exception {
for (ScanNode scanNode : scanNodes) {
List<TScanRangeLocations> locations = scanNode.getScanRangeLocations(0);
if (locations == null) {
continue;
}
FragmentScanRangeAssignment assignment =
fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment;
if (isColocateJoin(scanNode.getFragment().getPlanRoot())) {
computeScanRangeAssignmentByColocate((OlapScanNode) scanNode, assignment);
} else {
computeScanRangeAssignmentByScheduler(scanNode, locations, assignment);
}
}
}
private void computeScanRangeAssignmentByColocate(
final OlapScanNode scanNode,
FragmentScanRangeAssignment assignment) throws Exception {
for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) {
List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq);
if (!bucketSeqToAddress.containsKey(bucketSeq)) {
getExecHostPortForBucketSeq(locations.get(0), bucketSeq);
}
for(TScanRangeLocations location: locations) {
Map<Integer, List<TScanRangeParams>> scanRanges =
findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList =
findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = location.scan_range;
scanRangeParamsList.add(scanRangeParams);
}
}
}
private void getExecHostPortForBucketSeq(TScanRangeLocations seqLocation, Integer bucketSeq) throws Exception {
int randomLocation = new Random().nextInt(seqLocation.locations.size());
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(seqLocation.locations.get(randomLocation).backend_id, seqLocation.locations, this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
this.bucketSeqToAddress.put(bucketSeq, execHostPort);
}
private void computeScanRangeAssignmentByScheduler(
final ScanNode scanNode,
final List<TScanRangeLocations> locations,
FragmentScanRangeAssignment assignment) throws Exception {
HashMap<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap();
for (TScanRangeLocations scanRangeLocations : locations) {
Long minAssignedBytes = Long.MAX_VALUE;
TScanRangeLocation minLocation = null;
for (final TScanRangeLocation location : scanRangeLocations.getLocations()) {
Long assignedBytes = findOrInsert(assignedBytesPerHost, location.server, 0L);
if (assignedBytes < minAssignedBytes) {
minAssignedBytes = assignedBytes;
minLocation = location;
}
}
Long scanRangeLength = getScanRangeLength(scanRangeLocations.scan_range);
assignedBytesPerHost.put(minLocation.server,
assignedBytesPerHost.get(minLocation.server) + scanRangeLength);
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(minLocation.backend_id,
scanRangeLocations.getLocations(), this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(assignment, execHostPort,
new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(),
new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = scanRangeLocations.scan_range;
scanRangeParams.setVolume_id(minLocation.volume_id);
scanRangeParamsList.add(scanRangeParams);
}
}
public void updateFragmentExecStatus(TReportExecStatusParams params) {
if (params.backend_num >= backendExecStates.size()) {
LOG.warn("unknown backend number: {}, expected less than: {}",
params.backend_num, backendExecStates.size());
return;
}
boolean done = false;
BackendExecState execState = backendExecStates.get(params.backend_num);
execState.lock();
try {
if (execState.done) {
return;
}
if (params.isSetProfile()) {
execState.profile.update(params.profile);
}
done = params.done;
execState.done = params.done;
} finally {
execState.unlock();
}
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
execState.profile().prettyPrint(builder, "");
LOG.debug("profile for query_id={} instance_id={}\n{}",
DebugUtil.printId(queryId),
DebugUtil.printId(params.getFragment_instance_id()),
builder.toString());
}
Status status = new Status(params.status);
if (!(returnedAllResults && status.isCancelled()) && !status.ok()) {
LOG.warn("one instance report fail, query_id={} instance_id={}",
DebugUtil.printId(queryId), DebugUtil.printId(params.getFragment_instance_id()));
updateStatus(status, params.getFragment_instance_id());
}
if (done) {
if (params.isSetDelta_urls()) {
updateDeltas(params.getDelta_urls());
}
if (params.isSetLoad_counters()) {
updateLoadCounters(params.getLoad_counters());
}
if (params.isSetTracking_url()) {
trackingUrl = params.tracking_url;
}
if (params.isSetExport_files()) {
updateExportFiles(params.export_files);
}
if (params.isSetCommitInfos()) {
updateCommitInfos(params.getCommitInfos());
}
profileDoneSignal.markedCountDown(params.getFragment_instance_id(), -1L);
}
if (params.isSetLoaded_rows()) {
Catalog.getCurrentCatalog().getLoadManager().updateJobLoadedRows(jobId, params.query_id, params.loaded_rows);
}
return;
}
public void endProfile() {
if (backendExecStates.isEmpty()) {
return;
}
if (needReport) {
try {
profileDoneSignal.await(2, TimeUnit.SECONDS);
} catch (InterruptedException e1) {
LOG.warn("signal await error", e1);
}
}
for (int i = 1; i < fragmentProfile.size(); ++i) {
fragmentProfile.get(i).sortChildren();
}
}
public boolean join(int seconds) {
try {
return profileDoneSignal.await(seconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
}
return false;
}
public boolean isDone() {
return profileDoneSignal.getCount() == 0;
}
class FragmentScanRangeAssignment
extends HashMap<TNetworkAddress, Map<Integer, List<TScanRangeParams>>> {
}
class BucketSeqToScanRange extends HashMap<Integer, Map<Integer, List<TScanRangeParams>>> {
}
private BucketSeqToScanRange bucketSeqToScanRange = new BucketSeqToScanRange();
private Map<Integer, TNetworkAddress> bucketSeqToAddress = Maps.newHashMap();
private Set<Integer> colocateFragmentIds = new HashSet<>();
public class BackendExecState {
TExecPlanFragmentParams rpcParams;
private PlanFragmentId fragmentId;
private int instanceId;
private boolean initiated;
private boolean done;
private boolean hasCanceled;
private Lock lock = new ReentrantLock();
private int profileFragmentId;
RuntimeProfile profile;
TNetworkAddress address;
Long backendId;
public int profileFragmentId() {
return profileFragmentId;
}
public boolean initiated() {
return initiated;
}
public RuntimeProfile profile() {
return profile;
}
public void lock() {
lock.lock();
}
public void unlock() {
lock.unlock();
}
public int getInstanceId() {
return instanceId;
}
public PlanFragmentId getFragmentId() {
return fragmentId;
}
public BackendExecState(PlanFragmentId fragmentId, int instanceId, int profileFragmentId,
TExecPlanFragmentParams rpcParams, Map<TNetworkAddress, Long> addressToBackendID) {
this.profileFragmentId = profileFragmentId;
this.fragmentId = fragmentId;
this.instanceId = instanceId;
this.rpcParams = rpcParams;
this.initiated = false;
this.done = false;
this.address = fragmentExecParamsMap.get(fragmentId).instanceExecParams.get(instanceId).host;
this.backendId = addressToBackendID.get(address);
String name = "Instance " + DebugUtil.printId(fragmentExecParamsMap.get(fragmentId)
.instanceExecParams.get(instanceId).instanceId) + " (host=" + address + ")";
this.profile = new RuntimeProfile(name);
this.hasCanceled = false;
}
public TNetworkAddress getBackendAddress() {
return address;
}
public TUniqueId getFragmentInstanceId() {
return this.rpcParams.params.getFragment_instance_id();
}
public Future<PExecPlanFragmentResult> execRemoteFragmentAsync() throws TException, RpcException {
TNetworkAddress brpcAddress = null;
try {
brpcAddress = toBrpcHost(address);
} catch (Exception e) {
throw new TException(e.getMessage());
}
initiated = true;
try {
return BackendServiceProxy.getInstance().execPlanFragmentAsync(brpcAddress, rpcParams);
} catch (RpcException e) {
SimpleScheduler.updateBlacklistBackends(backendId);
throw e;
}
}
}
protected class FragmentExecParams {
public PlanFragment fragment;
public List<TPlanFragmentDestination> destinations = Lists.newArrayList();
public Map<Integer, Integer> perExchNumSenders = Maps.newHashMap();
public List<PlanFragmentId> inputFragments = Lists.newArrayList();
public List<FInstanceExecParam> instanceExecParams = Lists.newArrayList();
public FragmentScanRangeAssignment scanRangeAssignment = new FragmentScanRangeAssignment();
public FragmentExecParams(PlanFragment fragment) {
this.fragment = fragment;
}
List<TExecPlanFragmentParams> toThrift(int backendNum) {
List<TExecPlanFragmentParams> paramsList = Lists.newArrayList();
for (int i = 0; i < instanceExecParams.size(); ++i) {
final FInstanceExecParam instanceExecParam = instanceExecParams.get(i);
TExecPlanFragmentParams params = new TExecPlanFragmentParams();
params.setProtocol_version(PaloInternalServiceVersion.V1);
params.setFragment(fragment.toThrift());
params.setDesc_tbl(descTable);
params.setParams(new TPlanFragmentExecParams());
params.setResource_info(tResourceInfo);
params.params.setQuery_id(queryId);
params.params.setFragment_instance_id(instanceExecParam.instanceId);
Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges;
if (scanRanges == null) {
scanRanges = Maps.newHashMap();
}
params.params.setPer_node_scan_ranges(scanRanges);
params.params.setPer_exch_num_senders(perExchNumSenders);
params.params.setDestinations(destinations);
params.params.setSender_id(i);
params.params.setNum_senders(instanceExecParams.size());
params.setCoord(coordAddress);
params.setBackend_num(backendNum++);
params.setQuery_globals(queryGlobals);
params.setQuery_options(queryOptions);
params.params.setSend_query_statistics_with_every_batch(
fragment.isTransferQueryStatisticsWithEveryBatch());
if (queryOptions.getQuery_type() == TQueryType.LOAD) {
LoadErrorHub.Param param = Catalog.getCurrentCatalog().getLoadInstance().getLoadErrorHubInfo();
if (param != null) {
TLoadErrorHubInfo info = param.toThrift();
if (info != null) {
params.setLoad_error_hub_info(info);
}
}
}
paramsList.add(params);
}
return paramsList;
}
public void appendScanRange(StringBuilder sb, List<TScanRangeParams> params) {
sb.append("range=[");
int idx = 0;
for (TScanRangeParams range : params) {
TPaloScanRange paloScanRange = range.getScan_range().getPalo_scan_range();
if (paloScanRange != null) {
if (idx++ != 0) {
sb.append(",");
}
sb.append("{tid=").append(paloScanRange.getTablet_id())
.append(",ver=").append(paloScanRange.getVersion()).append("}");
}
TEsScanRange esScanRange = range.getScan_range().getEs_scan_range();
if (esScanRange != null) {
sb.append("{ index=").append(esScanRange.getIndex())
.append(", shardid=").append(esScanRange.getShard_id())
.append("}");
}
}
sb.append("]");
}
public void appendTo(StringBuilder sb) {
sb.append("{plan=");
fragment.getPlanRoot().appendTrace(sb);
sb.append(",instance=[");
for (int i = 0; i < instanceExecParams.size(); ++i) {
if (i != 0) {
sb.append(",");
}
TNetworkAddress address = instanceExecParams.get(i).host;
Map<Integer, List<TScanRangeParams>> scanRanges =
scanRangeAssignment.get(address);
sb.append("{");
sb.append("id=").append(DebugUtil.printId(instanceExecParams.get(i).instanceId));
sb.append(",host=").append(instanceExecParams.get(i).host);
if (scanRanges == null) {
sb.append("}");
continue;
}
sb.append(",range=[");
int eIdx = 0;
for (Map.Entry<Integer, List<TScanRangeParams>> entry : scanRanges.entrySet()) {
if (eIdx++ != 0) {
sb.append(",");
}
sb.append("id").append(entry.getKey()).append(",");
appendScanRange(sb, entry.getValue());
}
sb.append("]");
sb.append("}");
}
sb.append("]");
sb.append("}");
}
} | class Coordinator {
private static final Logger LOG = LogManager.getLogger(Coordinator.class);
private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private static String localIP = FrontendOptions.getLocalHostAddress();
private static Random instanceRandom = new Random();
Status queryStatus = new Status();
Map<TNetworkAddress, Long> addressToBackendID = Maps.newHashMap();
private ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of();
private TDescriptorTable descTable;
private TQueryGlobals queryGlobals = new TQueryGlobals();
private TQueryOptions queryOptions;
private TNetworkAddress coordAddress;
private Lock lock = new ReentrantLock();
private boolean returnedAllResults;
private RuntimeProfile queryProfile;
private List<RuntimeProfile> fragmentProfile;
private Map<PlanFragmentId, FragmentExecParams> fragmentExecParamsMap = Maps.newHashMap();
private List<PlanFragment> fragments;
private List<BackendExecState> backendExecStates = Lists.newArrayList();
private ResultReceiver receiver;
private ConcurrentMap<TUniqueId, BackendExecState> backendExecStateMap =
Maps.newConcurrentMap();
private List<ScanNode> scanNodes;
private Set<TUniqueId> instanceIds = Sets.newHashSet();
private MarkedCountDownLatch<TUniqueId, Long> profileDoneSignal;
private boolean isBlockQuery;
private int numReceivedRows = 0;
private List<String> deltaUrls;
private Map<String, String> loadCounters;
private String trackingUrl;
private List<String> exportFiles;
private List<TTabletCommitInfo> commitInfos = Lists.newArrayList();
private long jobId = -1;
private TUniqueId queryId;
private TResourceInfo tResourceInfo;
private boolean needReport;
private String clusterName;
private final TUniqueId nextInstanceId;
public Coordinator(ConnectContext context, Analyzer analyzer, Planner planner) {
this.isBlockQuery = planner.isBlockQuery();
this.queryId = context.queryId();
this.fragments = planner.getFragments();
this.scanNodes = planner.getScanNodes();
this.descTable = analyzer.getDescTbl().toThrift();
this.returnedAllResults = false;
this.queryOptions = context.getSessionVariable().toThrift();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
if (context.getSessionVariable().getTimeZone().equals("CST")) {
this.queryGlobals.setTime_zone(TimeUtils.DEFAULT_TIME_ZONE);
} else {
this.queryGlobals.setTime_zone(context.getSessionVariable().getTimeZone());
}
this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(),
context.getSessionVariable().getResourceGroup());
this.needReport = context.getSessionVariable().isReportSucc();
this.clusterName = context.getClusterName();
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public Coordinator(Long jobId, TUniqueId queryId, DescriptorTable descTable,
List<PlanFragment> fragments, List<ScanNode> scanNodes, String cluster, String timezone) {
this.isBlockQuery = true;
this.jobId = jobId;
this.queryId = queryId;
this.descTable = descTable.toThrift();
this.fragments = fragments;
this.scanNodes = scanNodes;
this.queryOptions = new TQueryOptions();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
this.queryGlobals.setTime_zone(timezone);
this.tResourceInfo = new TResourceInfo("", "");
this.needReport = true;
this.clusterName = cluster;
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public long getJobId() {
return jobId;
}
public TUniqueId getQueryId() {
return queryId;
}
public void setQueryId(TUniqueId queryId) {
this.queryId = queryId;
}
public void setQueryType(TQueryType type) {
this.queryOptions.setQuery_type(type);
}
public Status getExecStatus() {
return queryStatus;
}
public RuntimeProfile getQueryProfile() {
return queryProfile;
}
public List<String> getDeltaUrls() {
return deltaUrls;
}
public Map<String, String> getLoadCounters() {
return loadCounters;
}
public String getTrackingUrl() {
return trackingUrl;
}
public void setExecMemoryLimit(long execMemoryLimit) {
this.queryOptions.setMem_limit(execMemoryLimit);
}
public void setTimeout(int timeout) {
this.queryOptions.setQuery_timeout(timeout);
}
public void clearExportStatus() {
lock.lock();
try {
this.backendExecStates.clear();
this.backendExecStateMap.clear();
this.queryStatus.setStatus(new Status());
if (this.exportFiles == null) {
this.exportFiles = Lists.newArrayList();
}
this.exportFiles.clear();
} finally {
lock.unlock();
}
}
public List<TTabletCommitInfo> getCommitInfos() {
return commitInfos;
}
private void prepare() {
for (PlanFragment fragment : fragments) {
fragmentExecParamsMap.put(fragment.getFragmentId(), new FragmentExecParams(fragment));
}
for (PlanFragment fragment : fragments) {
if (!(fragment.getSink() instanceof DataStreamSink)) {
continue;
}
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getDestFragment().getFragmentId());
params.inputFragments.add(fragment.getFragmentId());
}
coordAddress = new TNetworkAddress(localIP, Config.rpc_port);
int fragmentSize = fragments.size();
queryProfile = new RuntimeProfile("Execution Profile " + DebugUtil.printId(queryId));
fragmentProfile = new ArrayList<RuntimeProfile>();
for (int i = 0; i < fragmentSize; i ++) {
fragmentProfile.add(new RuntimeProfile("Fragment " + i));
queryProfile.addChild(fragmentProfile.get(i));
}
this.idToBackend = Catalog.getCurrentSystemInfo().getBackendsInCluster(clusterName);
if (LOG.isDebugEnabled()) {
LOG.debug("idToBackend size={}", idToBackend.size());
for (Map.Entry<Long, Backend> entry : idToBackend.entrySet()) {
Long backendID = entry.getKey();
Backend backend = entry.getValue();
LOG.debug("backend: {}-{}-{}", backendID, backend.getHost(), backend.getBePort());
}
}
}
private void lock() {
lock.lock();
}
private void unlock() {
lock.unlock();
}
private void traceInstance() {
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
int idx = 0;
sb.append("query id=").append(DebugUtil.printId(queryId)).append(",");
sb.append("fragment=[");
for (Map.Entry<PlanFragmentId, FragmentExecParams> entry : fragmentExecParamsMap.entrySet()) {
if (idx++ != 0) {
sb.append(",");
}
sb.append(entry.getKey());
entry.getValue().appendTo(sb);
}
sb.append("]");
LOG.debug(sb.toString());
}
}
public void exec() throws Exception {
if (!scanNodes.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, planNode: {}",
DebugUtil.printId(queryId), scanNodes.get(0).treeToThrift());
}
if (!fragments.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, fragment: {}",
DebugUtil.printId(queryId), fragments.get(0).toThrift());
}
prepare();
computeScanRangeAssignment();
computeFragmentExecParams();
traceInstance();
PlanFragmentId topId = fragments.get(0).getFragmentId();
FragmentExecParams topParams = fragmentExecParamsMap.get(topId);
if (topParams.fragment.getSink() instanceof ResultSink) {
receiver = new ResultReceiver(
topParams.instanceExecParams.get(0).instanceId,
addressToBackendID.get(topParams.instanceExecParams.get(0).host),
toBrpcHost(topParams.instanceExecParams.get(0).host),
queryOptions.query_timeout * 1000);
} else {
this.queryOptions.setIs_report_success(true);
deltaUrls = Lists.newArrayList();
loadCounters = Maps.newHashMap();
}
profileDoneSignal = new MarkedCountDownLatch<TUniqueId, Long>(instanceIds.size());
for (TUniqueId instanceId : instanceIds) {
profileDoneSignal.addMark(instanceId, -1L /* value is meaningless */);
}
lock();
try {
int backendId = 0;
int profileFragmentId = 0;
long memoryLimit = queryOptions.getMem_limit();
for (PlanFragment fragment : fragments) {
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
int instanceNum = params.instanceExecParams.size();
Preconditions.checkState(instanceNum > 0);
List<TExecPlanFragmentParams> tParams = params.toThrift(backendId);
List<Pair<BackendExecState, Future<PExecPlanFragmentResult>>> futures = Lists.newArrayList();
if (colocateFragmentIds.contains(fragment.getFragmentId().asInt())) {
int rate = Math.min(Config.query_colocate_join_memory_limit_penalty_factor, instanceNum);
long newmemory = memoryLimit / rate;
for (TExecPlanFragmentParams tParam : tParams) {
tParam.query_options.setMem_limit(newmemory);
}
}
int instanceId = 0;
for (TExecPlanFragmentParams tParam : tParams) {
BackendExecState execState =
new BackendExecState(fragment.getFragmentId(), instanceId++,
profileFragmentId, tParam, this.addressToBackendID);
backendExecStates.add(execState);
backendExecStateMap.put(tParam.params.getFragment_instance_id(), execState);
futures.add(Pair.create(execState, execState.execRemoteFragmentAsync()));
backendId++;
}
for (Pair<BackendExecState, Future<PExecPlanFragmentResult>> pair : futures) {
TStatusCode code = TStatusCode.INTERNAL_ERROR;
String errMsg = null;
try {
PExecPlanFragmentResult result = pair.second.get(Config.remote_fragment_exec_timeout_ms,
TimeUnit.MILLISECONDS);
code = TStatusCode.findByValue(result.status.status_code);
if (result.status.error_msgs != null && !result.status.error_msgs.isEmpty()) {
errMsg = result.status.error_msgs.get(0);
}
} catch (ExecutionException e) {
LOG.warn("catch a execute exception", e);
code = TStatusCode.THRIFT_RPC_ERROR;
} catch (InterruptedException e) {
LOG.warn("catch a interrupt exception", e);
code = TStatusCode.INTERNAL_ERROR;
} catch (TimeoutException e) {
LOG.warn("catch a timeout exception", e);
code = TStatusCode.TIMEOUT;
}
if (code != TStatusCode.OK) {
if (errMsg == null) {
errMsg = "exec rpc error. backend id: " + pair.first.backendId;
}
queryStatus.setStatus(errMsg);
LOG.warn("exec plan fragment failed, errmsg={}, fragmentId={}, backend={}:{}",
errMsg, fragment.getFragmentId(),
pair.first.address.hostname, pair.first.address.port);
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
switch (code) {
case TIMEOUT:
throw new UserException("query timeout. backend id: " + pair.first.backendId);
case THRIFT_RPC_ERROR:
SimpleScheduler.updateBlacklistBackends(pair.first.backendId);
throw new RpcException("rpc failed. backend id: " + pair.first.backendId);
default:
throw new UserException(errMsg);
}
}
}
profileFragmentId += 1;
}
attachInstanceProfileToFragmentProfile();
} finally {
unlock();
}
}
public List<String> getExportFiles() {
return exportFiles;
}
void updateExportFiles(List<String> files) {
lock.lock();
try {
if (exportFiles == null) {
exportFiles = Lists.newArrayList();
}
exportFiles.addAll(files);
} finally {
lock.unlock();
}
}
void updateDeltas(List<String> urls) {
lock.lock();
try {
deltaUrls.addAll(urls);
} finally {
lock.unlock();
}
}
private void updateLoadCounters(Map<String, String> newLoadCounters) {
lock.lock();
try {
long numRowsNormal = 0L;
String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal = Long.valueOf(value);
}
long numRowsAbnormal = 0L;
value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal = Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal += Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal += Long.valueOf(value);
}
this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal);
this.loadCounters.put(LoadEtlTask.DPP_ABNORMAL_ALL, "" + numRowsAbnormal);
} finally {
lock.unlock();
}
}
private void updateCommitInfos(List<TTabletCommitInfo> commitInfos) {
lock.lock();
try {
this.commitInfos.addAll(commitInfos);
} finally {
lock.unlock();
}
}
private void updateStatus(Status status, TUniqueId instanceId) {
lock.lock();
try {
if (returnedAllResults && status.isCancelled()) {
return;
}
if (status.ok()) {
return;
}
if (!queryStatus.ok()) {
return;
}
queryStatus.setStatus(status);
LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}",
jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN");
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
} finally {
lock.unlock();
}
}
public RowBatch getNext() throws Exception {
if (receiver == null) {
throw new UserException("There is no receiver.");
}
RowBatch resultBatch;
Status status = new Status();
resultBatch = receiver.getNext(status);
if (!status.ok()) {
LOG.warn("get next fail, need cancel. query id: {}", DebugUtil.printId(queryId));
}
updateStatus(status, null /* no instance id */);
Status copyStatus = null;
lock();
try {
copyStatus = new Status(queryStatus);
} finally {
unlock();
}
if (!copyStatus.ok()) {
if (Strings.isNullOrEmpty(copyStatus.getErrorMsg())) {
copyStatus.rewriteErrorMsg();
}
if (copyStatus.isRpcError()) {
throw new RpcException(copyStatus.getErrorMsg());
} else {
String errMsg = copyStatus.getErrorMsg();
LOG.warn("query failed: {}", errMsg);
int hostIndex = errMsg.indexOf("host");
if (hostIndex != -1) {
errMsg = errMsg.substring(0, hostIndex);
}
throw new UserException(errMsg);
}
}
if (resultBatch.isEos()) {
this.returnedAllResults = true;
Long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
boolean hasLimit = numLimitRows > 0;
if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) {
LOG.debug("no block query, return num >= limit rows, need cancel");
cancelInternal(PPlanFragmentCancelReason.LIMIT_REACH);
}
} else {
numReceivedRows += resultBatch.getBatch().getRowsSize();
}
return resultBatch;
}
public void cancel() {
lock();
try {
if (!queryStatus.ok()) {
return;
} else {
queryStatus.setStatus(Status.CANCELLED);
}
LOG.warn("cancel execution of query, this is outside invoke");
cancelInternal(PPlanFragmentCancelReason.USER_CANCEL);
} finally {
unlock();
}
}
private void cancelInternal(PPlanFragmentCancelReason cancelReason) {
if (null != receiver) {
receiver.cancel();
}
cancelRemoteFragmentsAsync(cancelReason);
if (profileDoneSignal != null) {
profileDoneSignal.countDownToZero(new Status());
LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e->DebugUtil.printId(e.getKey())).toArray());
}
}
private void cancelRemoteFragmentsAsync(PPlanFragmentCancelReason cancelReason) {
for (BackendExecState backendExecState : backendExecStates) {
TNetworkAddress address = backendExecState.getBackendAddress();
LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} ip={} port={} fragment instance id={}, reason: {}",
backendExecState.initiated, backendExecState.done, backendExecState.hasCanceled,
address.hostname, address.port, DebugUtil.printId(backendExecState.getFragmentInstanceId()),
cancelReason.name());
backendExecState.lock();
try {
if (!backendExecState.initiated) {
continue;
}
if (backendExecState.done) {
continue;
}
if (backendExecState.hasCanceled) {
continue;
}
TNetworkAddress brpcAddress = toBrpcHost(address);
try {
BackendServiceProxy.getInstance().cancelPlanFragmentAsync(
brpcAddress, backendExecState.getFragmentInstanceId(), cancelReason);
} catch (RpcException e) {
LOG.warn("cancel plan fragment get a exception, address={}:{}",
brpcAddress.getHostname(), brpcAddress.getPort());
SimpleScheduler.updateBlacklistBackends(addressToBackendID.get(brpcAddress));
}
backendExecState.hasCanceled = true;
} catch (Exception e) {
LOG.warn("catch a exception", e);
} finally {
backendExecState.unlock();
}
}
}
private void computeFragmentExecParams() throws Exception {
computeFragmentHosts();
instanceIds.clear();
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size());
for (int j = 0; j < params.instanceExecParams.size(); ++j) {
TUniqueId instanceId = new TUniqueId();
instanceId.setHi(queryId.hi);
instanceId.setLo(queryId.lo + instanceIds.size() + 1);
params.instanceExecParams.get(j).instanceId = instanceId;
instanceIds.add(instanceId);
}
}
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
PlanFragment destFragment = params.fragment.getDestFragment();
if (destFragment == null) {
continue;
}
FragmentExecParams destParams = fragmentExecParamsMap.get(destFragment.getFragmentId());
DataSink sink = params.fragment.getSink();
PlanNodeId exchId = sink.getExchNodeId();
if (destParams.perExchNumSenders.get(exchId.asInt()) == null) {
destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size());
} else {
destParams.perExchNumSenders.put(exchId.asInt(),
params.instanceExecParams.size() + destParams.perExchNumSenders.get(exchId.asInt()));
}
for (int j = 0; j < destParams.instanceExecParams.size(); ++j) {
TPlanFragmentDestination dest = new TPlanFragmentDestination();
dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId;
dest.server = toRpcHost(destParams.instanceExecParams.get(j).host);
dest.setBrpc_server(toBrpcHost(destParams.instanceExecParams.get(j).host));
params.destinations.add(dest);
}
}
}
private TNetworkAddress toRpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
TNetworkAddress dest = new TNetworkAddress(backend.getHost(), backend.getBeRpcPort());
return dest;
}
private TNetworkAddress toBrpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
if (backend.getBrpcPort() < 0) {
return null;
}
return new TNetworkAddress(backend.getHost(), backend.getBrpcPort());
}
private boolean containsUnionNode(PlanNode node) {
if (node instanceof UnionNode) {
return true;
}
for (PlanNode child : node.getChildren()) {
if (child instanceof ExchangeNode) {
continue;
} else if (child instanceof UnionNode) {
return true;
} else {
return containsUnionNode(child);
}
}
return false;
}
private boolean isColocateJoin(PlanNode node) {
if (Config.disable_colocate_join) {
return false;
}
if (ConnectContext.get() != null) {
if (ConnectContext.get().getSessionVariable().isDisableColocateJoin()) {
return false;
}
}
if (colocateFragmentIds.contains(node.getFragmentId().asInt())) {
return true;
}
if (node instanceof HashJoinNode) {
HashJoinNode joinNode = (HashJoinNode) node;
if (joinNode.isColocate()) {
colocateFragmentIds.add(joinNode.getFragmentId().asInt());
return true;
}
}
for (PlanNode childNode : node.getChildren()) {
return isColocateJoin(childNode);
}
return false;
}
private PlanNode findLeftmostNode(PlanNode plan) {
PlanNode newPlan = plan;
while (newPlan.getChildren().size() != 0 && !(newPlan instanceof ExchangeNode)) {
newPlan = newPlan.getChild(0);
}
return newPlan;
}
private <K, V> V findOrInsert(HashMap<K, V> m, final K key, final V defaultVal) {
V value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private List<TScanRangeParams> findOrInsert(Map<Integer, List<TScanRangeParams>> m, Integer key,
ArrayList<TScanRangeParams> defaultVal) {
List<TScanRangeParams> value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private long getScanRangeLength(final TScanRange scanRange) {
return 1;
}
private void computeScanRangeAssignment() throws Exception {
for (ScanNode scanNode : scanNodes) {
List<TScanRangeLocations> locations = scanNode.getScanRangeLocations(0);
if (locations == null) {
continue;
}
FragmentScanRangeAssignment assignment =
fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment;
if (isColocateJoin(scanNode.getFragment().getPlanRoot())) {
computeScanRangeAssignmentByColocate((OlapScanNode) scanNode, assignment);
} else {
computeScanRangeAssignmentByScheduler(scanNode, locations, assignment);
}
}
}
private void computeScanRangeAssignmentByColocate(
final OlapScanNode scanNode,
FragmentScanRangeAssignment assignment) throws Exception {
for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) {
List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq);
if (!bucketSeqToAddress.containsKey(bucketSeq)) {
getExecHostPortForBucketSeq(locations.get(0), bucketSeq);
}
for(TScanRangeLocations location: locations) {
Map<Integer, List<TScanRangeParams>> scanRanges =
findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList =
findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = location.scan_range;
scanRangeParamsList.add(scanRangeParams);
}
}
}
private void getExecHostPortForBucketSeq(TScanRangeLocations seqLocation, Integer bucketSeq) throws Exception {
int randomLocation = new Random().nextInt(seqLocation.locations.size());
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(seqLocation.locations.get(randomLocation).backend_id, seqLocation.locations, this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
this.bucketSeqToAddress.put(bucketSeq, execHostPort);
}
private void computeScanRangeAssignmentByScheduler(
final ScanNode scanNode,
final List<TScanRangeLocations> locations,
FragmentScanRangeAssignment assignment) throws Exception {
HashMap<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap();
for (TScanRangeLocations scanRangeLocations : locations) {
Long minAssignedBytes = Long.MAX_VALUE;
TScanRangeLocation minLocation = null;
for (final TScanRangeLocation location : scanRangeLocations.getLocations()) {
Long assignedBytes = findOrInsert(assignedBytesPerHost, location.server, 0L);
if (assignedBytes < minAssignedBytes) {
minAssignedBytes = assignedBytes;
minLocation = location;
}
}
Long scanRangeLength = getScanRangeLength(scanRangeLocations.scan_range);
assignedBytesPerHost.put(minLocation.server,
assignedBytesPerHost.get(minLocation.server) + scanRangeLength);
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(minLocation.backend_id,
scanRangeLocations.getLocations(), this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(assignment, execHostPort,
new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(),
new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = scanRangeLocations.scan_range;
scanRangeParams.setVolume_id(minLocation.volume_id);
scanRangeParamsList.add(scanRangeParams);
}
}
public void updateFragmentExecStatus(TReportExecStatusParams params) {
if (params.backend_num >= backendExecStates.size()) {
LOG.warn("unknown backend number: {}, expected less than: {}",
params.backend_num, backendExecStates.size());
return;
}
boolean done = false;
BackendExecState execState = backendExecStates.get(params.backend_num);
execState.lock();
try {
if (execState.done) {
return;
}
if (params.isSetProfile()) {
execState.profile.update(params.profile);
}
done = params.done;
execState.done = params.done;
} finally {
execState.unlock();
}
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
execState.profile().prettyPrint(builder, "");
LOG.debug("profile for query_id={} instance_id={}\n{}",
DebugUtil.printId(queryId),
DebugUtil.printId(params.getFragment_instance_id()),
builder.toString());
}
Status status = new Status(params.status);
if (!(returnedAllResults && status.isCancelled()) && !status.ok()) {
LOG.warn("one instance report fail, query_id={} instance_id={}",
DebugUtil.printId(queryId), DebugUtil.printId(params.getFragment_instance_id()));
updateStatus(status, params.getFragment_instance_id());
}
if (done) {
if (params.isSetDelta_urls()) {
updateDeltas(params.getDelta_urls());
}
if (params.isSetLoad_counters()) {
updateLoadCounters(params.getLoad_counters());
}
if (params.isSetTracking_url()) {
trackingUrl = params.tracking_url;
}
if (params.isSetExport_files()) {
updateExportFiles(params.export_files);
}
if (params.isSetCommitInfos()) {
updateCommitInfos(params.getCommitInfos());
}
profileDoneSignal.markedCountDown(params.getFragment_instance_id(), -1L);
}
if (params.isSetLoaded_rows()) {
Catalog.getCurrentCatalog().getLoadManager().updateJobLoadedRows(jobId, params.query_id, params.loaded_rows);
}
return;
}
public void endProfile() {
if (backendExecStates.isEmpty()) {
return;
}
if (needReport) {
try {
profileDoneSignal.await(2, TimeUnit.SECONDS);
} catch (InterruptedException e1) {
LOG.warn("signal await error", e1);
}
}
for (int i = 1; i < fragmentProfile.size(); ++i) {
fragmentProfile.get(i).sortChildren();
}
}
public boolean join(int seconds) {
try {
return profileDoneSignal.await(seconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
}
return false;
}
public boolean isDone() {
return profileDoneSignal.getCount() == 0;
}
class FragmentScanRangeAssignment
extends HashMap<TNetworkAddress, Map<Integer, List<TScanRangeParams>>> {
}
class BucketSeqToScanRange extends HashMap<Integer, Map<Integer, List<TScanRangeParams>>> {
}
private BucketSeqToScanRange bucketSeqToScanRange = new BucketSeqToScanRange();
private Map<Integer, TNetworkAddress> bucketSeqToAddress = Maps.newHashMap();
private Set<Integer> colocateFragmentIds = new HashSet<>();
public class BackendExecState {
TExecPlanFragmentParams rpcParams;
private PlanFragmentId fragmentId;
private int instanceId;
private boolean initiated;
private boolean done;
private boolean hasCanceled;
private Lock lock = new ReentrantLock();
private int profileFragmentId;
RuntimeProfile profile;
TNetworkAddress address;
Long backendId;
public int profileFragmentId() {
return profileFragmentId;
}
public boolean initiated() {
return initiated;
}
public RuntimeProfile profile() {
return profile;
}
public void lock() {
lock.lock();
}
public void unlock() {
lock.unlock();
}
public int getInstanceId() {
return instanceId;
}
public PlanFragmentId getFragmentId() {
return fragmentId;
}
public BackendExecState(PlanFragmentId fragmentId, int instanceId, int profileFragmentId,
TExecPlanFragmentParams rpcParams, Map<TNetworkAddress, Long> addressToBackendID) {
this.profileFragmentId = profileFragmentId;
this.fragmentId = fragmentId;
this.instanceId = instanceId;
this.rpcParams = rpcParams;
this.initiated = false;
this.done = false;
this.address = fragmentExecParamsMap.get(fragmentId).instanceExecParams.get(instanceId).host;
this.backendId = addressToBackendID.get(address);
String name = "Instance " + DebugUtil.printId(fragmentExecParamsMap.get(fragmentId)
.instanceExecParams.get(instanceId).instanceId) + " (host=" + address + ")";
this.profile = new RuntimeProfile(name);
this.hasCanceled = false;
}
public TNetworkAddress getBackendAddress() {
return address;
}
public TUniqueId getFragmentInstanceId() {
return this.rpcParams.params.getFragment_instance_id();
}
public Future<PExecPlanFragmentResult> execRemoteFragmentAsync() throws TException, RpcException {
TNetworkAddress brpcAddress = null;
try {
brpcAddress = toBrpcHost(address);
} catch (Exception e) {
throw new TException(e.getMessage());
}
initiated = true;
try {
return BackendServiceProxy.getInstance().execPlanFragmentAsync(brpcAddress, rpcParams);
} catch (RpcException e) {
SimpleScheduler.updateBlacklistBackends(backendId);
throw e;
}
}
}
protected class FragmentExecParams {
public PlanFragment fragment;
public List<TPlanFragmentDestination> destinations = Lists.newArrayList();
public Map<Integer, Integer> perExchNumSenders = Maps.newHashMap();
public List<PlanFragmentId> inputFragments = Lists.newArrayList();
public List<FInstanceExecParam> instanceExecParams = Lists.newArrayList();
public FragmentScanRangeAssignment scanRangeAssignment = new FragmentScanRangeAssignment();
public FragmentExecParams(PlanFragment fragment) {
this.fragment = fragment;
}
List<TExecPlanFragmentParams> toThrift(int backendNum) {
List<TExecPlanFragmentParams> paramsList = Lists.newArrayList();
for (int i = 0; i < instanceExecParams.size(); ++i) {
final FInstanceExecParam instanceExecParam = instanceExecParams.get(i);
TExecPlanFragmentParams params = new TExecPlanFragmentParams();
params.setProtocol_version(PaloInternalServiceVersion.V1);
params.setFragment(fragment.toThrift());
params.setDesc_tbl(descTable);
params.setParams(new TPlanFragmentExecParams());
params.setResource_info(tResourceInfo);
params.params.setQuery_id(queryId);
params.params.setFragment_instance_id(instanceExecParam.instanceId);
Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges;
if (scanRanges == null) {
scanRanges = Maps.newHashMap();
}
params.params.setPer_node_scan_ranges(scanRanges);
params.params.setPer_exch_num_senders(perExchNumSenders);
params.params.setDestinations(destinations);
params.params.setSender_id(i);
params.params.setNum_senders(instanceExecParams.size());
params.setCoord(coordAddress);
params.setBackend_num(backendNum++);
params.setQuery_globals(queryGlobals);
params.setQuery_options(queryOptions);
params.params.setSend_query_statistics_with_every_batch(
fragment.isTransferQueryStatisticsWithEveryBatch());
if (queryOptions.getQuery_type() == TQueryType.LOAD) {
LoadErrorHub.Param param = Catalog.getCurrentCatalog().getLoadInstance().getLoadErrorHubInfo();
if (param != null) {
TLoadErrorHubInfo info = param.toThrift();
if (info != null) {
params.setLoad_error_hub_info(info);
}
}
}
paramsList.add(params);
}
return paramsList;
}
public void appendScanRange(StringBuilder sb, List<TScanRangeParams> params) {
sb.append("range=[");
int idx = 0;
for (TScanRangeParams range : params) {
TPaloScanRange paloScanRange = range.getScan_range().getPalo_scan_range();
if (paloScanRange != null) {
if (idx++ != 0) {
sb.append(",");
}
sb.append("{tid=").append(paloScanRange.getTablet_id())
.append(",ver=").append(paloScanRange.getVersion()).append("}");
}
TEsScanRange esScanRange = range.getScan_range().getEs_scan_range();
if (esScanRange != null) {
sb.append("{ index=").append(esScanRange.getIndex())
.append(", shardid=").append(esScanRange.getShard_id())
.append("}");
}
}
sb.append("]");
}
public void appendTo(StringBuilder sb) {
sb.append("{plan=");
fragment.getPlanRoot().appendTrace(sb);
sb.append(",instance=[");
for (int i = 0; i < instanceExecParams.size(); ++i) {
if (i != 0) {
sb.append(",");
}
TNetworkAddress address = instanceExecParams.get(i).host;
Map<Integer, List<TScanRangeParams>> scanRanges =
scanRangeAssignment.get(address);
sb.append("{");
sb.append("id=").append(DebugUtil.printId(instanceExecParams.get(i).instanceId));
sb.append(",host=").append(instanceExecParams.get(i).host);
if (scanRanges == null) {
sb.append("}");
continue;
}
sb.append(",range=[");
int eIdx = 0;
for (Map.Entry<Integer, List<TScanRangeParams>> entry : scanRanges.entrySet()) {
if (eIdx++ != 0) {
sb.append(",");
}
sb.append("id").append(entry.getKey()).append(",");
appendScanRange(sb, entry.getValue());
}
sb.append("]");
sb.append("}");
}
sb.append("]");
sb.append("}");
}
} |
Note that Java assert statements are typically only enabled when running unit tests, not in production. | private void depthFirstTraverse(NewDocumentType docType) {
if (sortedTypes.containsKey(docType.getName())) {
return;
}
for (NewDocumentType.Name referenceDocTypeName : docType.getDocumentReferences()) {
NewDocumentType referenceDocType = unsortedTypes.get(referenceDocTypeName.getName());
assert (referenceDocType != null);
depthFirstTraverse(referenceDocType);
}
sortedTypes.put(docType.getName(), docType);
} | assert (referenceDocType != null); | private void depthFirstTraverse(NewDocumentType docType) {
if (sortedTypes.containsKey(docType.getName())) {
return;
}
for (NewDocumentType.Name referenceDocTypeName : docType.getDocumentReferences()) {
NewDocumentType referenceDocType = unsortedTypes.get(referenceDocTypeName.getName());
depthFirstTraverse(referenceDocType);
}
sortedTypes.put(docType.getName(), docType);
} | class TopologicalDocumentTypeSorter {
private final Map<String, NewDocumentType> unsortedTypes = new LinkedHashMap<>();
private final Map<String, NewDocumentType> sortedTypes = new LinkedHashMap<>();
private TopologicalDocumentTypeSorter(Collection<NewDocumentType> documentTypes) {
documentTypes.stream().forEach(docType -> unsortedTypes.put(docType.getName(), docType));
unsortedTypes.values().stream().forEach(docType -> depthFirstTraverse(docType));
}
public static List<NewDocumentType> sort(Collection<NewDocumentType> documentTypes) {
TopologicalDocumentTypeSorter sorter = new TopologicalDocumentTypeSorter(documentTypes);
return sorter.sortedTypes.values().stream().collect(Collectors.toList());
}
} | class TopologicalDocumentTypeSorter {
private final Map<String, NewDocumentType> unsortedTypes = new LinkedHashMap<>();
private final Map<String, NewDocumentType> sortedTypes = new LinkedHashMap<>();
private TopologicalDocumentTypeSorter(Collection<NewDocumentType> documentTypes) {
documentTypes.forEach(docType -> unsortedTypes.put(docType.getName(), docType));
unsortedTypes.values().forEach(docType -> depthFirstTraverse(docType));
}
public static List<NewDocumentType> sort(Collection<NewDocumentType> documentTypes) {
TopologicalDocumentTypeSorter sorter = new TopologicalDocumentTypeSorter(documentTypes);
return new ArrayList<>(sorter.sortedTypes.values());
}
} |
`return new ArrayList<>(sortedTypes.values());` | public static List<NewDocumentType> sort(Collection<NewDocumentType> documentTypes) {
TopologicalDocumentTypeSorter sorter = new TopologicalDocumentTypeSorter(documentTypes);
return sorter.sortedTypes.values().stream().collect(Collectors.toList());
} | return sorter.sortedTypes.values().stream().collect(Collectors.toList()); | public static List<NewDocumentType> sort(Collection<NewDocumentType> documentTypes) {
TopologicalDocumentTypeSorter sorter = new TopologicalDocumentTypeSorter(documentTypes);
return new ArrayList<>(sorter.sortedTypes.values());
} | class TopologicalDocumentTypeSorter {
private final Map<String, NewDocumentType> unsortedTypes = new LinkedHashMap<>();
private final Map<String, NewDocumentType> sortedTypes = new LinkedHashMap<>();
private TopologicalDocumentTypeSorter(Collection<NewDocumentType> documentTypes) {
documentTypes.stream().forEach(docType -> unsortedTypes.put(docType.getName(), docType));
unsortedTypes.values().stream().forEach(docType -> depthFirstTraverse(docType));
}
private void depthFirstTraverse(NewDocumentType docType) {
if (sortedTypes.containsKey(docType.getName())) {
return;
}
for (NewDocumentType.Name referenceDocTypeName : docType.getDocumentReferences()) {
NewDocumentType referenceDocType = unsortedTypes.get(referenceDocTypeName.getName());
assert (referenceDocType != null);
depthFirstTraverse(referenceDocType);
}
sortedTypes.put(docType.getName(), docType);
}
} | class TopologicalDocumentTypeSorter {
private final Map<String, NewDocumentType> unsortedTypes = new LinkedHashMap<>();
private final Map<String, NewDocumentType> sortedTypes = new LinkedHashMap<>();
private TopologicalDocumentTypeSorter(Collection<NewDocumentType> documentTypes) {
documentTypes.forEach(docType -> unsortedTypes.put(docType.getName(), docType));
unsortedTypes.values().forEach(docType -> depthFirstTraverse(docType));
}
private void depthFirstTraverse(NewDocumentType docType) {
if (sortedTypes.containsKey(docType.getName())) {
return;
}
for (NewDocumentType.Name referenceDocTypeName : docType.getDocumentReferences()) {
NewDocumentType referenceDocType = unsortedTypes.get(referenceDocTypeName.getName());
depthFirstTraverse(referenceDocType);
}
sortedTypes.put(docType.getName(), docType);
}
} |
MInor: Change signature of the method called to be void as well? | public void remove(String hostname) {
Node nodeToRemove = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
List<Node.State> legalStates = dynamicAllocationEnabled() ?
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty) :
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException("Can only remove node from following states: " +
legalStates.stream().map(Node.State::name).collect(Collectors.joining(", ")));
}
if (nodeToRemove.state().equals(Node.State.dirty)) {
if (!(nodeToRemove.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))) {
throw new IllegalArgumentException("Only docker nodes can be deleted from state dirty");
}
}
try (Mutex lock = lock(nodeToRemove)) {
db.removeNode(nodeToRemove.state(), hostname);
}
} | db.removeNode(nodeToRemove.state(), hostname); | public void remove(String hostname) {
Node nodeToRemove = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
List<Node.State> legalStates = dynamicAllocationEnabled() ?
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty) :
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException("Can only remove node from following states: " +
legalStates.stream().map(Node.State::name).collect(Collectors.joining(", ")));
}
if (nodeToRemove.state().equals(Node.State.dirty)) {
if (!(nodeToRemove.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))) {
throw new IllegalArgumentException("Only docker nodes can be deleted from state dirty");
}
}
try (Mutex lock = lock(nodeToRemove)) {
db.removeNode(nodeToRemove.state(), hostname);
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()));
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return db.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<String> trustedNetworks = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
break;
case host:
trustedNetworks.add("172.17.0.0/16");
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childNodes(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return db.getDefaultFlavorForApplication(applicationId);
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return db.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return db.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/**
* Removes a node. A node must be in a legal state before it can be removed.
*/
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
/*
* Temporary feature toggle to enable/disable dynamic docker allocation
* TODO: Remove when enabled in all zones
*/
public boolean dynamicAllocationEnabled() {
return curator.exists(Path.fromString("/provision/v1/dynamicDockerAllocation"));
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()));
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return db.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<String> trustedNetworks = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
break;
case host:
trustedNetworks.add("172.17.0.0/16");
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childNodes(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return db.getDefaultFlavorForApplication(applicationId);
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return db.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return db.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/**
* Removes a node. A node must be in a legal state before it can be removed.
*/
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
/*
* Temporary feature toggle to enable/disable dynamic docker allocation
* TODO: Remove when enabled in all zones
*/
public boolean dynamicAllocationEnabled() {
return curator.exists(Path.fromString("/provision/v1/dynamicDockerAllocation"));
}
} |
Was the delete in finally not needed? | public void only_allow_to_delete_dirty_nodes_when_dynamic_allocation_feature_enabled() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "docker", NodeType.tenant);
tester.nodeRepository().setDirty("host2");
try {
tester.nodeRepository().remove("host2");
fail("Should not be able to delete tenant node in state dirty");
} catch (IllegalArgumentException ignored) {
}
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
tester.nodeRepository().remove("host2");
} | } | public void only_allow_to_delete_dirty_nodes_when_dynamic_allocation_feature_enabled() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "docker", NodeType.tenant);
tester.nodeRepository().setDirty("host2");
try {
tester.nodeRepository().remove("host2");
fail("Should not be able to delete tenant node in state dirty");
} catch (IllegalArgumentException ignored) {
}
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
tester.nodeRepository().remove("host2");
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.getNodes(NodeType.tenant).size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.getNodes(NodeType.tenant).size());
tester.nodeRepository().park("host2", Agent.system, "Parking to unit test");
tester.nodeRepository().remove("host2");
assertEquals(2, tester.getNodes(NodeType.tenant).size());
}
@Test
public void applicationDefaultFlavor() {
NodeRepositoryTester tester = new NodeRepositoryTester();
ApplicationId application = ApplicationId.from(TenantName.from("a"), ApplicationName.from("b"), InstanceName.from("c"));
Path path = Path.fromString("/provision/v1/defaultFlavor").append(application.serializedForm());
String flavor = "example-flavor";
tester.curator().create(path);
tester.curator().set(path, flavor.getBytes(StandardCharsets.UTF_8));
assertEquals(Optional.of(flavor), tester.nodeRepository().getDefaultFlavorOverride(application));
ApplicationId applicationWithoutDefaultFlavor =
ApplicationId.from(TenantName.from("does"), ApplicationName.from("not"), InstanceName.from("exist"));
assertFalse(tester.nodeRepository().getDefaultFlavorOverride(applicationWithoutDefaultFlavor).isPresent());
}
@Test
public void featureToggleDynamicAllocationTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertFalse(tester.nodeRepository().dynamicAllocationEnabled());
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
assertTrue(tester.nodeRepository().dynamicAllocationEnabled());
}
@Test
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.getNodes(NodeType.tenant).size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.getNodes(NodeType.tenant).size());
tester.nodeRepository().park("host2", Agent.system, "Parking to unit test");
tester.nodeRepository().remove("host2");
assertEquals(2, tester.getNodes(NodeType.tenant).size());
}
@Test
public void applicationDefaultFlavor() {
NodeRepositoryTester tester = new NodeRepositoryTester();
ApplicationId application = ApplicationId.from(TenantName.from("a"), ApplicationName.from("b"), InstanceName.from("c"));
Path path = Path.fromString("/provision/v1/defaultFlavor").append(application.serializedForm());
String flavor = "example-flavor";
tester.curator().create(path);
tester.curator().set(path, flavor.getBytes(StandardCharsets.UTF_8));
assertEquals(Optional.of(flavor), tester.nodeRepository().getDefaultFlavorOverride(application));
ApplicationId applicationWithoutDefaultFlavor =
ApplicationId.from(TenantName.from("does"), ApplicationName.from("not"), InstanceName.from("exist"));
assertFalse(tester.nodeRepository().getDefaultFlavorOverride(applicationWithoutDefaultFlavor).isPresent());
}
@Test
public void featureToggleDynamicAllocationTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertFalse(tester.nodeRepository().dynamicAllocationEnabled());
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
assertTrue(tester.nodeRepository().dynamicAllocationEnabled());
}
@Test
} |
The other tests don't use it, additionally this is the last thing that happens in the test, and a new `Curator` is created for next test as part of `@Before`, the `MockCurator` uses memory FS, so delete shouldn't be necessary. | public void only_allow_to_delete_dirty_nodes_when_dynamic_allocation_feature_enabled() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "docker", NodeType.tenant);
tester.nodeRepository().setDirty("host2");
try {
tester.nodeRepository().remove("host2");
fail("Should not be able to delete tenant node in state dirty");
} catch (IllegalArgumentException ignored) {
}
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
tester.nodeRepository().remove("host2");
} | } | public void only_allow_to_delete_dirty_nodes_when_dynamic_allocation_feature_enabled() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "docker", NodeType.tenant);
tester.nodeRepository().setDirty("host2");
try {
tester.nodeRepository().remove("host2");
fail("Should not be able to delete tenant node in state dirty");
} catch (IllegalArgumentException ignored) {
}
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
tester.nodeRepository().remove("host2");
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.getNodes(NodeType.tenant).size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.getNodes(NodeType.tenant).size());
tester.nodeRepository().park("host2", Agent.system, "Parking to unit test");
tester.nodeRepository().remove("host2");
assertEquals(2, tester.getNodes(NodeType.tenant).size());
}
@Test
public void applicationDefaultFlavor() {
NodeRepositoryTester tester = new NodeRepositoryTester();
ApplicationId application = ApplicationId.from(TenantName.from("a"), ApplicationName.from("b"), InstanceName.from("c"));
Path path = Path.fromString("/provision/v1/defaultFlavor").append(application.serializedForm());
String flavor = "example-flavor";
tester.curator().create(path);
tester.curator().set(path, flavor.getBytes(StandardCharsets.UTF_8));
assertEquals(Optional.of(flavor), tester.nodeRepository().getDefaultFlavorOverride(application));
ApplicationId applicationWithoutDefaultFlavor =
ApplicationId.from(TenantName.from("does"), ApplicationName.from("not"), InstanceName.from("exist"));
assertFalse(tester.nodeRepository().getDefaultFlavorOverride(applicationWithoutDefaultFlavor).isPresent());
}
@Test
public void featureToggleDynamicAllocationTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertFalse(tester.nodeRepository().dynamicAllocationEnabled());
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
assertTrue(tester.nodeRepository().dynamicAllocationEnabled());
}
@Test
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.getNodes(NodeType.tenant).size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.getNodes(NodeType.tenant).size());
tester.nodeRepository().park("host2", Agent.system, "Parking to unit test");
tester.nodeRepository().remove("host2");
assertEquals(2, tester.getNodes(NodeType.tenant).size());
}
@Test
public void applicationDefaultFlavor() {
NodeRepositoryTester tester = new NodeRepositoryTester();
ApplicationId application = ApplicationId.from(TenantName.from("a"), ApplicationName.from("b"), InstanceName.from("c"));
Path path = Path.fromString("/provision/v1/defaultFlavor").append(application.serializedForm());
String flavor = "example-flavor";
tester.curator().create(path);
tester.curator().set(path, flavor.getBytes(StandardCharsets.UTF_8));
assertEquals(Optional.of(flavor), tester.nodeRepository().getDefaultFlavorOverride(application));
ApplicationId applicationWithoutDefaultFlavor =
ApplicationId.from(TenantName.from("does"), ApplicationName.from("not"), InstanceName.from("exist"));
assertFalse(tester.nodeRepository().getDefaultFlavorOverride(applicationWithoutDefaultFlavor).isPresent());
}
@Test
public void featureToggleDynamicAllocationTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertFalse(tester.nodeRepository().dynamicAllocationEnabled());
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
assertTrue(tester.nodeRepository().dynamicAllocationEnabled());
}
@Test
} |
Changed | public void remove(String hostname) {
Node nodeToRemove = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
List<Node.State> legalStates = dynamicAllocationEnabled() ?
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty) :
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException("Can only remove node from following states: " +
legalStates.stream().map(Node.State::name).collect(Collectors.joining(", ")));
}
if (nodeToRemove.state().equals(Node.State.dirty)) {
if (!(nodeToRemove.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))) {
throw new IllegalArgumentException("Only docker nodes can be deleted from state dirty");
}
}
try (Mutex lock = lock(nodeToRemove)) {
db.removeNode(nodeToRemove.state(), hostname);
}
} | db.removeNode(nodeToRemove.state(), hostname); | public void remove(String hostname) {
Node nodeToRemove = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
List<Node.State> legalStates = dynamicAllocationEnabled() ?
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty) :
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException("Can only remove node from following states: " +
legalStates.stream().map(Node.State::name).collect(Collectors.joining(", ")));
}
if (nodeToRemove.state().equals(Node.State.dirty)) {
if (!(nodeToRemove.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))) {
throw new IllegalArgumentException("Only docker nodes can be deleted from state dirty");
}
}
try (Mutex lock = lock(nodeToRemove)) {
db.removeNode(nodeToRemove.state(), hostname);
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()));
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return db.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<String> trustedNetworks = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
break;
case host:
trustedNetworks.add("172.17.0.0/16");
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childNodes(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return db.getDefaultFlavorForApplication(applicationId);
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return db.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return db.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/**
* Removes a node. A node must be in a legal state before it can be removed.
*/
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
/*
* Temporary feature toggle to enable/disable dynamic docker allocation
* TODO: Remove when enabled in all zones
*/
public boolean dynamicAllocationEnabled() {
return curator.exists(Path.fromString("/provision/v1/dynamicDockerAllocation"));
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()));
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return db.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<String> trustedNetworks = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
break;
case host:
trustedNetworks.add("172.17.0.0/16");
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childNodes(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return db.getDefaultFlavorForApplication(applicationId);
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return db.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return db.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/**
* Removes a node. A node must be in a legal state before it can be removed.
*/
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
/*
* Temporary feature toggle to enable/disable dynamic docker allocation
* TODO: Remove when enabled in all zones
*/
public boolean dynamicAllocationEnabled() {
return curator.exists(Path.fromString("/provision/v1/dynamicDockerAllocation"));
}
} |
OK, good | public void only_allow_to_delete_dirty_nodes_when_dynamic_allocation_feature_enabled() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "docker", NodeType.tenant);
tester.nodeRepository().setDirty("host2");
try {
tester.nodeRepository().remove("host2");
fail("Should not be able to delete tenant node in state dirty");
} catch (IllegalArgumentException ignored) {
}
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
tester.nodeRepository().remove("host2");
} | } | public void only_allow_to_delete_dirty_nodes_when_dynamic_allocation_feature_enabled() {
NodeRepositoryTester tester = new NodeRepositoryTester();
tester.addNode("id1", "host1", "default", NodeType.host);
tester.addNode("id2", "host2", "docker", NodeType.tenant);
tester.nodeRepository().setDirty("host2");
try {
tester.nodeRepository().remove("host2");
fail("Should not be able to delete tenant node in state dirty");
} catch (IllegalArgumentException ignored) {
}
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
tester.nodeRepository().remove("host2");
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.getNodes(NodeType.tenant).size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.getNodes(NodeType.tenant).size());
tester.nodeRepository().park("host2", Agent.system, "Parking to unit test");
tester.nodeRepository().remove("host2");
assertEquals(2, tester.getNodes(NodeType.tenant).size());
}
@Test
public void applicationDefaultFlavor() {
NodeRepositoryTester tester = new NodeRepositoryTester();
ApplicationId application = ApplicationId.from(TenantName.from("a"), ApplicationName.from("b"), InstanceName.from("c"));
Path path = Path.fromString("/provision/v1/defaultFlavor").append(application.serializedForm());
String flavor = "example-flavor";
tester.curator().create(path);
tester.curator().set(path, flavor.getBytes(StandardCharsets.UTF_8));
assertEquals(Optional.of(flavor), tester.nodeRepository().getDefaultFlavorOverride(application));
ApplicationId applicationWithoutDefaultFlavor =
ApplicationId.from(TenantName.from("does"), ApplicationName.from("not"), InstanceName.from("exist"));
assertFalse(tester.nodeRepository().getDefaultFlavorOverride(applicationWithoutDefaultFlavor).isPresent());
}
@Test
public void featureToggleDynamicAllocationTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertFalse(tester.nodeRepository().dynamicAllocationEnabled());
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
assertTrue(tester.nodeRepository().dynamicAllocationEnabled());
}
@Test
} | class NodeRepositoryTest {
@Test
public void nodeRepositoryTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertEquals(0, tester.getNodes(NodeType.tenant).size());
tester.addNode("id1", "host1", "default", NodeType.tenant);
tester.addNode("id2", "host2", "default", NodeType.tenant);
tester.addNode("id3", "host3", "default", NodeType.tenant);
assertEquals(3, tester.getNodes(NodeType.tenant).size());
tester.nodeRepository().park("host2", Agent.system, "Parking to unit test");
tester.nodeRepository().remove("host2");
assertEquals(2, tester.getNodes(NodeType.tenant).size());
}
@Test
public void applicationDefaultFlavor() {
NodeRepositoryTester tester = new NodeRepositoryTester();
ApplicationId application = ApplicationId.from(TenantName.from("a"), ApplicationName.from("b"), InstanceName.from("c"));
Path path = Path.fromString("/provision/v1/defaultFlavor").append(application.serializedForm());
String flavor = "example-flavor";
tester.curator().create(path);
tester.curator().set(path, flavor.getBytes(StandardCharsets.UTF_8));
assertEquals(Optional.of(flavor), tester.nodeRepository().getDefaultFlavorOverride(application));
ApplicationId applicationWithoutDefaultFlavor =
ApplicationId.from(TenantName.from("does"), ApplicationName.from("not"), InstanceName.from("exist"));
assertFalse(tester.nodeRepository().getDefaultFlavorOverride(applicationWithoutDefaultFlavor).isPresent());
}
@Test
public void featureToggleDynamicAllocationTest() {
NodeRepositoryTester tester = new NodeRepositoryTester();
assertFalse(tester.nodeRepository().dynamicAllocationEnabled());
tester.curator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
assertTrue(tester.nodeRepository().dynamicAllocationEnabled());
}
@Test
} |
I think you meant to do int maxFieldInhit = summaries.fieldCount() and remove the slime.symbols line ... | private void fill(List<FastHit> hits, byte[] slimeBytes) {
Slime slime = BinaryFormat.decode(slimeBytes);
int maxFieldsInhit = slime.symbols();
Inspector summaries = new SlimeAdapter(slime.get().field("docsums"));
summaries.fieldCount();
if ( ! summaries.valid())
throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field");
for (int i = 0; i < hits.size(); i++) {
FastHit hit = hits.get(i);
hit.reserve(maxFieldsInhit);
fill(hit, summaries.entry(i).field("docsum"));
}
} | summaries.fieldCount(); | private void fill(List<FastHit> hits, byte[] slimeBytes) {
Inspector summaries = new SlimeAdapter(BinaryFormat.decode(slimeBytes).get().field("docsums"));
if ( ! summaries.valid())
throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field");
for (int i = 0; i < hits.size(); i++) {
fill(hits.get(i), summaries.entry(i).field("docsum"));
}
} | class GetDocsumsResponseReceiver {
private final BlockingQueue<Client.GetDocsumsResponseOrError> responses;
private final Compressor compressor;
private final Result result;
/** Whether we have already logged/notified about an error - to avoid spamming */
private boolean hasReportedError = false;
/** The number of responses we should receive (and process) before this is complete */
private int outstandingResponses;
public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) {
this.compressor = compressor;
responses = new LinkedBlockingQueue<>(requestCount);
outstandingResponses = requestCount;
this.result = result;
}
/** Called by a thread belonging to the client when a valid response becomes available */
public void receive(Client.GetDocsumsResponseOrError response) {
responses.add(response);
}
private void throwTimeout() throws TimeoutException {
throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding.");
}
/**
* Call this from the dispatcher thread to initiate and complete processing of responses.
* This will block until all responses are available and processed, or to timeout.
*/
public void processResponses(Query query) throws TimeoutException {
try {
while (outstandingResponses > 0) {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
throwTimeout();
}
Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
if (response == null)
throwTimeout();
processResponse(response);
outstandingResponses--;
}
}
catch (InterruptedException e) {
}
}
private void processResponse(Client.GetDocsumsResponseOrError responseOrError) {
if (responseOrError.error().isPresent()) {
if (hasReportedError) return;
String error = responseOrError.error().get();
result.hits().addError(ErrorMessage.createBackendCommunicationError(error));
log.log(Level.WARNING, "Error fetching summary data: "+ error);
}
else {
Client.GetDocsumsResponse response = responseOrError.response().get();
CompressionType compression = CompressionType.valueOf(response.compression());
byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize());
fill(response.hitsContext(), slimeBytes);
}
}
private void fill(FastHit hit, Inspector summary) {
summary.traverse((String name, Inspector value) -> {
hit.setField(name, nativeTypeOf(value));
});
}
private Object nativeTypeOf(Inspector inspector) {
switch (inspector.type()) {
case ARRAY: return inspector;
case OBJECT: return inspector;
case BOOL: return inspector.asBool();
case DATA: return inspector.asData();
case DOUBLE: return inspector.asDouble();
case LONG: return inspector.asLong();
case STRING: return inspector.asString();
case EMPTY : return null;
default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type());
}
}
} | class GetDocsumsResponseReceiver {
private final BlockingQueue<Client.GetDocsumsResponseOrError> responses;
private final Compressor compressor;
private final Result result;
/** Whether we have already logged/notified about an error - to avoid spamming */
private boolean hasReportedError = false;
/** The number of responses we should receive (and process) before this is complete */
private int outstandingResponses;
public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) {
this.compressor = compressor;
responses = new LinkedBlockingQueue<>(requestCount);
outstandingResponses = requestCount;
this.result = result;
}
/** Called by a thread belonging to the client when a valid response becomes available */
public void receive(Client.GetDocsumsResponseOrError response) {
responses.add(response);
}
private void throwTimeout() throws TimeoutException {
throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding.");
}
/**
* Call this from the dispatcher thread to initiate and complete processing of responses.
* This will block until all responses are available and processed, or to timeout.
*/
public void processResponses(Query query) throws TimeoutException {
try {
while (outstandingResponses > 0) {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
throwTimeout();
}
Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
if (response == null)
throwTimeout();
processResponse(response);
outstandingResponses--;
}
}
catch (InterruptedException e) {
}
}
private void processResponse(Client.GetDocsumsResponseOrError responseOrError) {
if (responseOrError.error().isPresent()) {
if (hasReportedError) return;
String error = responseOrError.error().get();
result.hits().addError(ErrorMessage.createBackendCommunicationError(error));
log.log(Level.WARNING, "Error fetching summary data: "+ error);
}
else {
Client.GetDocsumsResponse response = responseOrError.response().get();
CompressionType compression = CompressionType.valueOf(response.compression());
byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize());
fill(response.hitsContext(), slimeBytes);
}
}
private void fill(FastHit hit, Inspector summary) {
hit.reserve(summary.fieldCount());
summary.traverse((String name, Inspector value) -> {
hit.setField(name, nativeTypeOf(value));
});
}
private Object nativeTypeOf(Inspector inspector) {
switch (inspector.type()) {
case ARRAY: return inspector;
case OBJECT: return inspector;
case BOOL: return inspector.asBool();
case DATA: return inspector.asData();
case DOUBLE: return inspector.asDouble();
case LONG: return inspector.asLong();
case STRING: return inspector.asString();
case EMPTY : return null;
default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type());
}
}
} |
TNetworkAddress is easier to use than string, string need to be splited | private void computeFragmentHosts() throws Exception {
for (int i = fragments.size() - 1; i >= 0; --i) {
PlanFragment fragment = fragments.get(i);
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
if (fragment.getDataPartition() == DataPartition.UNPARTITIONED) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
LOG.warn("DataPartition UNPARTITIONED, no scanNode Backend");
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
continue;
}
PlanNode leftMostNode = findLeftmostNode(fragment.getPlanRoot());
boolean hasUnionNode = containsUnionNode(fragment.getPlanRoot());
if (!(leftMostNode instanceof ScanNode) && !hasUnionNode) {
PlanFragmentId inputFragmentIdx =
fragments.get(i).getChild(0).getFragmentId();
int doris_exchange_instances= -1;
if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) {
doris_exchange_instances = ConnectContext.get().getSessionVariable().getDorisExchangeInstances();
}
if (doris_exchange_instances > 0 && fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams.size() > doris_exchange_instances) {
List<TNetworkAddress> hosts = Lists.newArrayList();
Set<String> cache = new HashSet<String>();
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
String hostPort = execParams.host.getHostname() + execParams.host.getPort();
if (!cache.contains(hostPort)) {
hosts.add(execParams.host);
cache.add(hostPort);
}
}
Collections.shuffle(hosts, instanceRandom);
for (int index = 0; index < doris_exchange_instances; index++) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()),0, params);
params.instanceExecParams.add(instanceParam);
}
} else {
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host,0, params);
params.instanceExecParams.add(instanceParam);
}
}
Collections.shuffle(params.instanceExecParams, instanceRandom);
continue;
}
if (bucketSeqToAddress.size() > 0 && isColocateJoin(fragment.getPlanRoot())) {
for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, bucketSeqToAddress.get(scanRanges.getKey()), 0, params);
Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue();
for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRanges.entrySet()) {
instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue());
}
params.instanceExecParams.add(instanceParam);
}
} else {
Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator();
int parallelExecInstanceNum = fragment.getParallel_exec_num();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
TNetworkAddress key = (TNetworkAddress) entry.getKey();
Map<Integer, List<TScanRangeParams>> value = (Map<Integer, List<TScanRangeParams>>) entry.getValue();
for (Integer planNodeId : value.keySet()) {
List<TScanRangeParams> perNodeScanRanges = value.get(planNodeId);
int expectedInstanceNum = 1;
if (parallelExecInstanceNum > 1) {
expectedInstanceNum = Math.min(perNodeScanRanges.size(), parallelExecInstanceNum);
}
List<List<TScanRangeParams>> perInstanceScanRanges = ListUtil.splitBySize(perNodeScanRanges,
expectedInstanceNum);
for (List<TScanRangeParams> scanRangeParams : perInstanceScanRanges) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, key, 0, params);
instanceParam.perNodeScanRanges.put(planNodeId, scanRangeParams);
params.instanceExecParams.add(instanceParam);
}
}
}
}
if (params.instanceExecParams.isEmpty()) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
}
}
} | Set<String> cache = new HashSet<String>(); | private void computeFragmentHosts() throws Exception {
for (int i = fragments.size() - 1; i >= 0; --i) {
PlanFragment fragment = fragments.get(i);
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
if (fragment.getDataPartition() == DataPartition.UNPARTITIONED) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
LOG.warn("DataPartition UNPARTITIONED, no scanNode Backend");
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
continue;
}
PlanNode leftMostNode = findLeftmostNode(fragment.getPlanRoot());
boolean hasUnionNode = containsUnionNode(fragment.getPlanRoot());
if (!(leftMostNode instanceof ScanNode) && !hasUnionNode) {
PlanFragmentId inputFragmentIdx =
fragments.get(i).getChild(0).getFragmentId();
int exchangeInstances = -1;
if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) {
exchangeInstances = ConnectContext.get().getSessionVariable().getExchangeInstanceParallel();
}
if (exchangeInstances > 0 && fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams.size() > exchangeInstances) {
Set<TNetworkAddress> hostSet = Sets.newHashSet();
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
hostSet.add(execParams.host);
}
List<TNetworkAddress> hosts = Lists.newArrayList(hostSet);
Collections.shuffle(hosts, instanceRandom);
for (int index = 0; index < exchangeInstances; index++) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()), 0, params);
params.instanceExecParams.add(instanceParam);
}
} else {
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host, 0, params);
params.instanceExecParams.add(instanceParam);
}
}
Collections.shuffle(params.instanceExecParams, instanceRandom);
continue;
}
if (bucketSeqToAddress.size() > 0 && isColocateJoin(fragment.getPlanRoot())) {
for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, bucketSeqToAddress.get(scanRanges.getKey()), 0, params);
Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue();
for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRanges.entrySet()) {
instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue());
}
params.instanceExecParams.add(instanceParam);
}
} else {
Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator();
int parallelExecInstanceNum = fragment.getParallel_exec_num();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
TNetworkAddress key = (TNetworkAddress) entry.getKey();
Map<Integer, List<TScanRangeParams>> value = (Map<Integer, List<TScanRangeParams>>) entry.getValue();
for (Integer planNodeId : value.keySet()) {
List<TScanRangeParams> perNodeScanRanges = value.get(planNodeId);
int expectedInstanceNum = 1;
if (parallelExecInstanceNum > 1) {
expectedInstanceNum = Math.min(perNodeScanRanges.size(), parallelExecInstanceNum);
}
List<List<TScanRangeParams>> perInstanceScanRanges = ListUtil.splitBySize(perNodeScanRanges,
expectedInstanceNum);
for (List<TScanRangeParams> scanRangeParams : perInstanceScanRanges) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, key, 0, params);
instanceParam.perNodeScanRanges.put(planNodeId, scanRangeParams);
params.instanceExecParams.add(instanceParam);
}
}
}
}
if (params.instanceExecParams.isEmpty()) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
}
}
} | class Coordinator {
private static final Logger LOG = LogManager.getLogger(Coordinator.class);
private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private static String localIP = FrontendOptions.getLocalHostAddress();
private static Random instanceRandom = new Random();
Status queryStatus = new Status();
Map<TNetworkAddress, Long> addressToBackendID = Maps.newHashMap();
private ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of();
private TDescriptorTable descTable;
private TQueryGlobals queryGlobals = new TQueryGlobals();
private TQueryOptions queryOptions;
private TNetworkAddress coordAddress;
private Lock lock = new ReentrantLock();
private boolean returnedAllResults;
private RuntimeProfile queryProfile;
private List<RuntimeProfile> fragmentProfile;
private Map<PlanFragmentId, FragmentExecParams> fragmentExecParamsMap = Maps.newHashMap();
private List<PlanFragment> fragments;
private List<BackendExecState> backendExecStates = Lists.newArrayList();
private ResultReceiver receiver;
private ConcurrentMap<TUniqueId, BackendExecState> backendExecStateMap =
Maps.newConcurrentMap();
private List<ScanNode> scanNodes;
private Set<TUniqueId> instanceIds = Sets.newHashSet();
private MarkedCountDownLatch<TUniqueId, Long> profileDoneSignal;
private boolean isBlockQuery;
private int numReceivedRows = 0;
private List<String> deltaUrls;
private Map<String, String> loadCounters;
private String trackingUrl;
private List<String> exportFiles;
private List<TTabletCommitInfo> commitInfos = Lists.newArrayList();
private long jobId = -1;
private TUniqueId queryId;
private TResourceInfo tResourceInfo;
private boolean needReport;
private String clusterName;
private final TUniqueId nextInstanceId;
public Coordinator(ConnectContext context, Analyzer analyzer, Planner planner) {
this.isBlockQuery = planner.isBlockQuery();
this.queryId = context.queryId();
this.fragments = planner.getFragments();
this.scanNodes = planner.getScanNodes();
this.descTable = analyzer.getDescTbl().toThrift();
this.returnedAllResults = false;
this.queryOptions = context.getSessionVariable().toThrift();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
if (context.getSessionVariable().getTimeZone().equals("CST")) {
this.queryGlobals.setTime_zone(TimeUtils.DEFAULT_TIME_ZONE);
} else {
this.queryGlobals.setTime_zone(context.getSessionVariable().getTimeZone());
}
this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(),
context.getSessionVariable().getResourceGroup());
this.needReport = context.getSessionVariable().isReportSucc();
this.clusterName = context.getClusterName();
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public Coordinator(Long jobId, TUniqueId queryId, DescriptorTable descTable,
List<PlanFragment> fragments, List<ScanNode> scanNodes, String cluster, String timezone) {
this.isBlockQuery = true;
this.jobId = jobId;
this.queryId = queryId;
this.descTable = descTable.toThrift();
this.fragments = fragments;
this.scanNodes = scanNodes;
this.queryOptions = new TQueryOptions();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
this.queryGlobals.setTime_zone(timezone);
this.tResourceInfo = new TResourceInfo("", "");
this.needReport = true;
this.clusterName = cluster;
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public long getJobId() {
return jobId;
}
public TUniqueId getQueryId() {
return queryId;
}
public void setQueryId(TUniqueId queryId) {
this.queryId = queryId;
}
public void setQueryType(TQueryType type) {
this.queryOptions.setQuery_type(type);
}
public Status getExecStatus() {
return queryStatus;
}
public RuntimeProfile getQueryProfile() {
return queryProfile;
}
public List<String> getDeltaUrls() {
return deltaUrls;
}
public Map<String, String> getLoadCounters() {
return loadCounters;
}
public String getTrackingUrl() {
return trackingUrl;
}
public void setExecMemoryLimit(long execMemoryLimit) {
this.queryOptions.setMem_limit(execMemoryLimit);
}
public void setTimeout(int timeout) {
this.queryOptions.setQuery_timeout(timeout);
}
public void clearExportStatus() {
lock.lock();
try {
this.backendExecStates.clear();
this.backendExecStateMap.clear();
this.queryStatus.setStatus(new Status());
if (this.exportFiles == null) {
this.exportFiles = Lists.newArrayList();
}
this.exportFiles.clear();
} finally {
lock.unlock();
}
}
public List<TTabletCommitInfo> getCommitInfos() {
return commitInfos;
}
private void prepare() {
for (PlanFragment fragment : fragments) {
fragmentExecParamsMap.put(fragment.getFragmentId(), new FragmentExecParams(fragment));
}
for (PlanFragment fragment : fragments) {
if (!(fragment.getSink() instanceof DataStreamSink)) {
continue;
}
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getDestFragment().getFragmentId());
params.inputFragments.add(fragment.getFragmentId());
}
coordAddress = new TNetworkAddress(localIP, Config.rpc_port);
int fragmentSize = fragments.size();
queryProfile = new RuntimeProfile("Execution Profile " + DebugUtil.printId(queryId));
fragmentProfile = new ArrayList<RuntimeProfile>();
for (int i = 0; i < fragmentSize; i ++) {
fragmentProfile.add(new RuntimeProfile("Fragment " + i));
queryProfile.addChild(fragmentProfile.get(i));
}
this.idToBackend = Catalog.getCurrentSystemInfo().getBackendsInCluster(clusterName);
if (LOG.isDebugEnabled()) {
LOG.debug("idToBackend size={}", idToBackend.size());
for (Map.Entry<Long, Backend> entry : idToBackend.entrySet()) {
Long backendID = entry.getKey();
Backend backend = entry.getValue();
LOG.debug("backend: {}-{}-{}", backendID, backend.getHost(), backend.getBePort());
}
}
}
private void lock() {
lock.lock();
}
private void unlock() {
lock.unlock();
}
private void traceInstance() {
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
int idx = 0;
sb.append("query id=").append(DebugUtil.printId(queryId)).append(",");
sb.append("fragment=[");
for (Map.Entry<PlanFragmentId, FragmentExecParams> entry : fragmentExecParamsMap.entrySet()) {
if (idx++ != 0) {
sb.append(",");
}
sb.append(entry.getKey());
entry.getValue().appendTo(sb);
}
sb.append("]");
LOG.debug(sb.toString());
}
}
public void exec() throws Exception {
if (!scanNodes.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, planNode: {}",
DebugUtil.printId(queryId), scanNodes.get(0).treeToThrift());
}
if (!fragments.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, fragment: {}",
DebugUtil.printId(queryId), fragments.get(0).toThrift());
}
prepare();
computeScanRangeAssignment();
computeFragmentExecParams();
traceInstance();
PlanFragmentId topId = fragments.get(0).getFragmentId();
FragmentExecParams topParams = fragmentExecParamsMap.get(topId);
if (topParams.fragment.getSink() instanceof ResultSink) {
receiver = new ResultReceiver(
topParams.instanceExecParams.get(0).instanceId,
addressToBackendID.get(topParams.instanceExecParams.get(0).host),
toBrpcHost(topParams.instanceExecParams.get(0).host),
queryOptions.query_timeout * 1000);
} else {
this.queryOptions.setIs_report_success(true);
deltaUrls = Lists.newArrayList();
loadCounters = Maps.newHashMap();
}
profileDoneSignal = new MarkedCountDownLatch<TUniqueId, Long>(instanceIds.size());
for (TUniqueId instanceId : instanceIds) {
profileDoneSignal.addMark(instanceId, -1L /* value is meaningless */);
}
lock();
try {
int backendId = 0;
int profileFragmentId = 0;
long memoryLimit = queryOptions.getMem_limit();
for (PlanFragment fragment : fragments) {
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
int instanceNum = params.instanceExecParams.size();
Preconditions.checkState(instanceNum > 0);
List<TExecPlanFragmentParams> tParams = params.toThrift(backendId);
List<Pair<BackendExecState, Future<PExecPlanFragmentResult>>> futures = Lists.newArrayList();
if (colocateFragmentIds.contains(fragment.getFragmentId().asInt())) {
int rate = Math.min(Config.query_colocate_join_memory_limit_penalty_factor, instanceNum);
long newmemory = memoryLimit / rate;
for (TExecPlanFragmentParams tParam : tParams) {
tParam.query_options.setMem_limit(newmemory);
}
}
int instanceId = 0;
for (TExecPlanFragmentParams tParam : tParams) {
BackendExecState execState =
new BackendExecState(fragment.getFragmentId(), instanceId++,
profileFragmentId, tParam, this.addressToBackendID);
backendExecStates.add(execState);
backendExecStateMap.put(tParam.params.getFragment_instance_id(), execState);
futures.add(Pair.create(execState, execState.execRemoteFragmentAsync()));
backendId++;
}
for (Pair<BackendExecState, Future<PExecPlanFragmentResult>> pair : futures) {
TStatusCode code = TStatusCode.INTERNAL_ERROR;
String errMsg = null;
try {
PExecPlanFragmentResult result = pair.second.get(Config.remote_fragment_exec_timeout_ms,
TimeUnit.MILLISECONDS);
code = TStatusCode.findByValue(result.status.status_code);
if (result.status.error_msgs != null && !result.status.error_msgs.isEmpty()) {
errMsg = result.status.error_msgs.get(0);
}
} catch (ExecutionException e) {
LOG.warn("catch a execute exception", e);
code = TStatusCode.THRIFT_RPC_ERROR;
} catch (InterruptedException e) {
LOG.warn("catch a interrupt exception", e);
code = TStatusCode.INTERNAL_ERROR;
} catch (TimeoutException e) {
LOG.warn("catch a timeout exception", e);
code = TStatusCode.TIMEOUT;
}
if (code != TStatusCode.OK) {
if (errMsg == null) {
errMsg = "exec rpc error. backend id: " + pair.first.backendId;
}
queryStatus.setStatus(errMsg);
LOG.warn("exec plan fragment failed, errmsg={}, fragmentId={}, backend={}:{}",
errMsg, fragment.getFragmentId(),
pair.first.address.hostname, pair.first.address.port);
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
switch (code) {
case TIMEOUT:
throw new UserException("query timeout. backend id: " + pair.first.backendId);
case THRIFT_RPC_ERROR:
SimpleScheduler.updateBlacklistBackends(pair.first.backendId);
throw new RpcException("rpc failed. backend id: " + pair.first.backendId);
default:
throw new UserException(errMsg);
}
}
}
profileFragmentId += 1;
}
attachInstanceProfileToFragmentProfile();
} finally {
unlock();
}
}
public List<String> getExportFiles() {
return exportFiles;
}
void updateExportFiles(List<String> files) {
lock.lock();
try {
if (exportFiles == null) {
exportFiles = Lists.newArrayList();
}
exportFiles.addAll(files);
} finally {
lock.unlock();
}
}
void updateDeltas(List<String> urls) {
lock.lock();
try {
deltaUrls.addAll(urls);
} finally {
lock.unlock();
}
}
private void updateLoadCounters(Map<String, String> newLoadCounters) {
lock.lock();
try {
long numRowsNormal = 0L;
String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal = Long.valueOf(value);
}
long numRowsAbnormal = 0L;
value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal = Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal += Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal += Long.valueOf(value);
}
this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal);
this.loadCounters.put(LoadEtlTask.DPP_ABNORMAL_ALL, "" + numRowsAbnormal);
} finally {
lock.unlock();
}
}
private void updateCommitInfos(List<TTabletCommitInfo> commitInfos) {
lock.lock();
try {
this.commitInfos.addAll(commitInfos);
} finally {
lock.unlock();
}
}
private void updateStatus(Status status, TUniqueId instanceId) {
lock.lock();
try {
if (returnedAllResults && status.isCancelled()) {
return;
}
if (status.ok()) {
return;
}
if (!queryStatus.ok()) {
return;
}
queryStatus.setStatus(status);
LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}",
jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN");
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
} finally {
lock.unlock();
}
}
public RowBatch getNext() throws Exception {
if (receiver == null) {
throw new UserException("There is no receiver.");
}
RowBatch resultBatch;
Status status = new Status();
resultBatch = receiver.getNext(status);
if (!status.ok()) {
LOG.warn("get next fail, need cancel. query id: {}", DebugUtil.printId(queryId));
}
updateStatus(status, null /* no instance id */);
Status copyStatus = null;
lock();
try {
copyStatus = new Status(queryStatus);
} finally {
unlock();
}
if (!copyStatus.ok()) {
if (Strings.isNullOrEmpty(copyStatus.getErrorMsg())) {
copyStatus.rewriteErrorMsg();
}
if (copyStatus.isRpcError()) {
throw new RpcException(copyStatus.getErrorMsg());
} else {
String errMsg = copyStatus.getErrorMsg();
LOG.warn("query failed: {}", errMsg);
int hostIndex = errMsg.indexOf("host");
if (hostIndex != -1) {
errMsg = errMsg.substring(0, hostIndex);
}
throw new UserException(errMsg);
}
}
if (resultBatch.isEos()) {
this.returnedAllResults = true;
Long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
boolean hasLimit = numLimitRows > 0;
if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) {
LOG.debug("no block query, return num >= limit rows, need cancel");
cancelInternal(PPlanFragmentCancelReason.LIMIT_REACH);
}
} else {
numReceivedRows += resultBatch.getBatch().getRowsSize();
}
return resultBatch;
}
public void cancel() {
lock();
try {
if (!queryStatus.ok()) {
return;
} else {
queryStatus.setStatus(Status.CANCELLED);
}
LOG.warn("cancel execution of query, this is outside invoke");
cancelInternal(PPlanFragmentCancelReason.USER_CANCEL);
} finally {
unlock();
}
}
private void cancelInternal(PPlanFragmentCancelReason cancelReason) {
if (null != receiver) {
receiver.cancel();
}
cancelRemoteFragmentsAsync(cancelReason);
if (profileDoneSignal != null) {
profileDoneSignal.countDownToZero(new Status());
LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e->DebugUtil.printId(e.getKey())).toArray());
}
}
private void cancelRemoteFragmentsAsync(PPlanFragmentCancelReason cancelReason) {
for (BackendExecState backendExecState : backendExecStates) {
TNetworkAddress address = backendExecState.getBackendAddress();
LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} ip={} port={} fragment instance id={}, reason: {}",
backendExecState.initiated, backendExecState.done, backendExecState.hasCanceled,
address.hostname, address.port, DebugUtil.printId(backendExecState.getFragmentInstanceId()),
cancelReason.name());
backendExecState.lock();
try {
if (!backendExecState.initiated) {
continue;
}
if (backendExecState.done) {
continue;
}
if (backendExecState.hasCanceled) {
continue;
}
TNetworkAddress brpcAddress = toBrpcHost(address);
try {
BackendServiceProxy.getInstance().cancelPlanFragmentAsync(
brpcAddress, backendExecState.getFragmentInstanceId(), cancelReason);
} catch (RpcException e) {
LOG.warn("cancel plan fragment get a exception, address={}:{}",
brpcAddress.getHostname(), brpcAddress.getPort());
SimpleScheduler.updateBlacklistBackends(addressToBackendID.get(brpcAddress));
}
backendExecState.hasCanceled = true;
} catch (Exception e) {
LOG.warn("catch a exception", e);
} finally {
backendExecState.unlock();
}
}
}
private void computeFragmentExecParams() throws Exception {
computeFragmentHosts();
instanceIds.clear();
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size());
for (int j = 0; j < params.instanceExecParams.size(); ++j) {
TUniqueId instanceId = new TUniqueId();
instanceId.setHi(queryId.hi);
instanceId.setLo(queryId.lo + instanceIds.size() + 1);
params.instanceExecParams.get(j).instanceId = instanceId;
instanceIds.add(instanceId);
}
}
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
PlanFragment destFragment = params.fragment.getDestFragment();
if (destFragment == null) {
continue;
}
FragmentExecParams destParams = fragmentExecParamsMap.get(destFragment.getFragmentId());
DataSink sink = params.fragment.getSink();
PlanNodeId exchId = sink.getExchNodeId();
if (destParams.perExchNumSenders.get(exchId.asInt()) == null) {
destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size());
} else {
destParams.perExchNumSenders.put(exchId.asInt(),
params.instanceExecParams.size() + destParams.perExchNumSenders.get(exchId.asInt()));
}
for (int j = 0; j < destParams.instanceExecParams.size(); ++j) {
TPlanFragmentDestination dest = new TPlanFragmentDestination();
dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId;
dest.server = toRpcHost(destParams.instanceExecParams.get(j).host);
dest.setBrpc_server(toBrpcHost(destParams.instanceExecParams.get(j).host));
params.destinations.add(dest);
}
}
}
private TNetworkAddress toRpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
TNetworkAddress dest = new TNetworkAddress(backend.getHost(), backend.getBeRpcPort());
return dest;
}
private TNetworkAddress toBrpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
if (backend.getBrpcPort() < 0) {
return null;
}
return new TNetworkAddress(backend.getHost(), backend.getBrpcPort());
}
private boolean containsUnionNode(PlanNode node) {
if (node instanceof UnionNode) {
return true;
}
for (PlanNode child : node.getChildren()) {
if (child instanceof ExchangeNode) {
continue;
} else if (child instanceof UnionNode) {
return true;
} else {
return containsUnionNode(child);
}
}
return false;
}
private boolean isColocateJoin(PlanNode node) {
if (Config.disable_colocate_join) {
return false;
}
if (ConnectContext.get() != null) {
if (ConnectContext.get().getSessionVariable().isDisableColocateJoin()) {
return false;
}
}
if (colocateFragmentIds.contains(node.getFragmentId().asInt())) {
return true;
}
if (node instanceof HashJoinNode) {
HashJoinNode joinNode = (HashJoinNode) node;
if (joinNode.isColocate()) {
colocateFragmentIds.add(joinNode.getFragmentId().asInt());
return true;
}
}
for (PlanNode childNode : node.getChildren()) {
return isColocateJoin(childNode);
}
return false;
}
private PlanNode findLeftmostNode(PlanNode plan) {
PlanNode newPlan = plan;
while (newPlan.getChildren().size() != 0 && !(newPlan instanceof ExchangeNode)) {
newPlan = newPlan.getChild(0);
}
return newPlan;
}
private <K, V> V findOrInsert(HashMap<K, V> m, final K key, final V defaultVal) {
V value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private List<TScanRangeParams> findOrInsert(Map<Integer, List<TScanRangeParams>> m, Integer key,
ArrayList<TScanRangeParams> defaultVal) {
List<TScanRangeParams> value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private long getScanRangeLength(final TScanRange scanRange) {
return 1;
}
private void computeScanRangeAssignment() throws Exception {
for (ScanNode scanNode : scanNodes) {
List<TScanRangeLocations> locations = scanNode.getScanRangeLocations(0);
if (locations == null) {
continue;
}
FragmentScanRangeAssignment assignment =
fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment;
if (isColocateJoin(scanNode.getFragment().getPlanRoot())) {
computeScanRangeAssignmentByColocate((OlapScanNode) scanNode, assignment);
} else {
computeScanRangeAssignmentByScheduler(scanNode, locations, assignment);
}
}
}
private void computeScanRangeAssignmentByColocate(
final OlapScanNode scanNode,
FragmentScanRangeAssignment assignment) throws Exception {
for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) {
List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq);
if (!bucketSeqToAddress.containsKey(bucketSeq)) {
getExecHostPortForBucketSeq(locations.get(0), bucketSeq);
}
for(TScanRangeLocations location: locations) {
Map<Integer, List<TScanRangeParams>> scanRanges =
findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList =
findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = location.scan_range;
scanRangeParamsList.add(scanRangeParams);
}
}
}
private void getExecHostPortForBucketSeq(TScanRangeLocations seqLocation, Integer bucketSeq) throws Exception {
int randomLocation = new Random().nextInt(seqLocation.locations.size());
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(seqLocation.locations.get(randomLocation).backend_id, seqLocation.locations, this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
this.bucketSeqToAddress.put(bucketSeq, execHostPort);
}
private void computeScanRangeAssignmentByScheduler(
final ScanNode scanNode,
final List<TScanRangeLocations> locations,
FragmentScanRangeAssignment assignment) throws Exception {
HashMap<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap();
for (TScanRangeLocations scanRangeLocations : locations) {
Long minAssignedBytes = Long.MAX_VALUE;
TScanRangeLocation minLocation = null;
for (final TScanRangeLocation location : scanRangeLocations.getLocations()) {
Long assignedBytes = findOrInsert(assignedBytesPerHost, location.server, 0L);
if (assignedBytes < minAssignedBytes) {
minAssignedBytes = assignedBytes;
minLocation = location;
}
}
Long scanRangeLength = getScanRangeLength(scanRangeLocations.scan_range);
assignedBytesPerHost.put(minLocation.server,
assignedBytesPerHost.get(minLocation.server) + scanRangeLength);
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(minLocation.backend_id,
scanRangeLocations.getLocations(), this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(assignment, execHostPort,
new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(),
new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = scanRangeLocations.scan_range;
scanRangeParams.setVolume_id(minLocation.volume_id);
scanRangeParamsList.add(scanRangeParams);
}
}
public void updateFragmentExecStatus(TReportExecStatusParams params) {
if (params.backend_num >= backendExecStates.size()) {
LOG.warn("unknown backend number: {}, expected less than: {}",
params.backend_num, backendExecStates.size());
return;
}
boolean done = false;
BackendExecState execState = backendExecStates.get(params.backend_num);
execState.lock();
try {
if (execState.done) {
return;
}
if (params.isSetProfile()) {
execState.profile.update(params.profile);
}
done = params.done;
execState.done = params.done;
} finally {
execState.unlock();
}
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
execState.profile().prettyPrint(builder, "");
LOG.debug("profile for query_id={} instance_id={}\n{}",
DebugUtil.printId(queryId),
DebugUtil.printId(params.getFragment_instance_id()),
builder.toString());
}
Status status = new Status(params.status);
if (!(returnedAllResults && status.isCancelled()) && !status.ok()) {
LOG.warn("one instance report fail, query_id={} instance_id={}",
DebugUtil.printId(queryId), DebugUtil.printId(params.getFragment_instance_id()));
updateStatus(status, params.getFragment_instance_id());
}
if (done) {
if (params.isSetDelta_urls()) {
updateDeltas(params.getDelta_urls());
}
if (params.isSetLoad_counters()) {
updateLoadCounters(params.getLoad_counters());
}
if (params.isSetTracking_url()) {
trackingUrl = params.tracking_url;
}
if (params.isSetExport_files()) {
updateExportFiles(params.export_files);
}
if (params.isSetCommitInfos()) {
updateCommitInfos(params.getCommitInfos());
}
profileDoneSignal.markedCountDown(params.getFragment_instance_id(), -1L);
}
if (params.isSetLoaded_rows()) {
Catalog.getCurrentCatalog().getLoadManager().updateJobLoadedRows(jobId, params.query_id, params.loaded_rows);
}
return;
}
public void endProfile() {
if (backendExecStates.isEmpty()) {
return;
}
if (needReport) {
try {
profileDoneSignal.await(2, TimeUnit.SECONDS);
} catch (InterruptedException e1) {
LOG.warn("signal await error", e1);
}
}
for (int i = 1; i < fragmentProfile.size(); ++i) {
fragmentProfile.get(i).sortChildren();
}
}
public boolean join(int seconds) {
try {
return profileDoneSignal.await(seconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
}
return false;
}
public boolean isDone() {
return profileDoneSignal.getCount() == 0;
}
class FragmentScanRangeAssignment
extends HashMap<TNetworkAddress, Map<Integer, List<TScanRangeParams>>> {
}
class BucketSeqToScanRange extends HashMap<Integer, Map<Integer, List<TScanRangeParams>>> {
}
private BucketSeqToScanRange bucketSeqToScanRange = new BucketSeqToScanRange();
private Map<Integer, TNetworkAddress> bucketSeqToAddress = Maps.newHashMap();
private Set<Integer> colocateFragmentIds = new HashSet<>();
public class BackendExecState {
TExecPlanFragmentParams rpcParams;
private PlanFragmentId fragmentId;
private int instanceId;
private boolean initiated;
private boolean done;
private boolean hasCanceled;
private Lock lock = new ReentrantLock();
private int profileFragmentId;
RuntimeProfile profile;
TNetworkAddress address;
Long backendId;
public int profileFragmentId() {
return profileFragmentId;
}
public boolean initiated() {
return initiated;
}
public RuntimeProfile profile() {
return profile;
}
public void lock() {
lock.lock();
}
public void unlock() {
lock.unlock();
}
public int getInstanceId() {
return instanceId;
}
public PlanFragmentId getFragmentId() {
return fragmentId;
}
public BackendExecState(PlanFragmentId fragmentId, int instanceId, int profileFragmentId,
TExecPlanFragmentParams rpcParams, Map<TNetworkAddress, Long> addressToBackendID) {
this.profileFragmentId = profileFragmentId;
this.fragmentId = fragmentId;
this.instanceId = instanceId;
this.rpcParams = rpcParams;
this.initiated = false;
this.done = false;
this.address = fragmentExecParamsMap.get(fragmentId).instanceExecParams.get(instanceId).host;
this.backendId = addressToBackendID.get(address);
String name = "Instance " + DebugUtil.printId(fragmentExecParamsMap.get(fragmentId)
.instanceExecParams.get(instanceId).instanceId) + " (host=" + address + ")";
this.profile = new RuntimeProfile(name);
this.hasCanceled = false;
}
public TNetworkAddress getBackendAddress() {
return address;
}
public TUniqueId getFragmentInstanceId() {
return this.rpcParams.params.getFragment_instance_id();
}
public Future<PExecPlanFragmentResult> execRemoteFragmentAsync() throws TException, RpcException {
TNetworkAddress brpcAddress = null;
try {
brpcAddress = toBrpcHost(address);
} catch (Exception e) {
throw new TException(e.getMessage());
}
initiated = true;
try {
return BackendServiceProxy.getInstance().execPlanFragmentAsync(brpcAddress, rpcParams);
} catch (RpcException e) {
SimpleScheduler.updateBlacklistBackends(backendId);
throw e;
}
}
}
protected class FragmentExecParams {
public PlanFragment fragment;
public List<TPlanFragmentDestination> destinations = Lists.newArrayList();
public Map<Integer, Integer> perExchNumSenders = Maps.newHashMap();
public List<PlanFragmentId> inputFragments = Lists.newArrayList();
public List<FInstanceExecParam> instanceExecParams = Lists.newArrayList();
public FragmentScanRangeAssignment scanRangeAssignment = new FragmentScanRangeAssignment();
public FragmentExecParams(PlanFragment fragment) {
this.fragment = fragment;
}
List<TExecPlanFragmentParams> toThrift(int backendNum) {
List<TExecPlanFragmentParams> paramsList = Lists.newArrayList();
for (int i = 0; i < instanceExecParams.size(); ++i) {
final FInstanceExecParam instanceExecParam = instanceExecParams.get(i);
TExecPlanFragmentParams params = new TExecPlanFragmentParams();
params.setProtocol_version(PaloInternalServiceVersion.V1);
params.setFragment(fragment.toThrift());
params.setDesc_tbl(descTable);
params.setParams(new TPlanFragmentExecParams());
params.setResource_info(tResourceInfo);
params.params.setQuery_id(queryId);
params.params.setFragment_instance_id(instanceExecParam.instanceId);
Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges;
if (scanRanges == null) {
scanRanges = Maps.newHashMap();
}
params.params.setPer_node_scan_ranges(scanRanges);
params.params.setPer_exch_num_senders(perExchNumSenders);
params.params.setDestinations(destinations);
params.params.setSender_id(i);
params.params.setNum_senders(instanceExecParams.size());
params.setCoord(coordAddress);
params.setBackend_num(backendNum++);
params.setQuery_globals(queryGlobals);
params.setQuery_options(queryOptions);
params.params.setSend_query_statistics_with_every_batch(
fragment.isTransferQueryStatisticsWithEveryBatch());
if (queryOptions.getQuery_type() == TQueryType.LOAD) {
LoadErrorHub.Param param = Catalog.getCurrentCatalog().getLoadInstance().getLoadErrorHubInfo();
if (param != null) {
TLoadErrorHubInfo info = param.toThrift();
if (info != null) {
params.setLoad_error_hub_info(info);
}
}
}
paramsList.add(params);
}
return paramsList;
}
public void appendScanRange(StringBuilder sb, List<TScanRangeParams> params) {
sb.append("range=[");
int idx = 0;
for (TScanRangeParams range : params) {
TPaloScanRange paloScanRange = range.getScan_range().getPalo_scan_range();
if (paloScanRange != null) {
if (idx++ != 0) {
sb.append(",");
}
sb.append("{tid=").append(paloScanRange.getTablet_id())
.append(",ver=").append(paloScanRange.getVersion()).append("}");
}
TEsScanRange esScanRange = range.getScan_range().getEs_scan_range();
if (esScanRange != null) {
sb.append("{ index=").append(esScanRange.getIndex())
.append(", shardid=").append(esScanRange.getShard_id())
.append("}");
}
}
sb.append("]");
}
public void appendTo(StringBuilder sb) {
sb.append("{plan=");
fragment.getPlanRoot().appendTrace(sb);
sb.append(",instance=[");
for (int i = 0; i < instanceExecParams.size(); ++i) {
if (i != 0) {
sb.append(",");
}
TNetworkAddress address = instanceExecParams.get(i).host;
Map<Integer, List<TScanRangeParams>> scanRanges =
scanRangeAssignment.get(address);
sb.append("{");
sb.append("id=").append(DebugUtil.printId(instanceExecParams.get(i).instanceId));
sb.append(",host=").append(instanceExecParams.get(i).host);
if (scanRanges == null) {
sb.append("}");
continue;
}
sb.append(",range=[");
int eIdx = 0;
for (Map.Entry<Integer, List<TScanRangeParams>> entry : scanRanges.entrySet()) {
if (eIdx++ != 0) {
sb.append(",");
}
sb.append("id").append(entry.getKey()).append(",");
appendScanRange(sb, entry.getValue());
}
sb.append("]");
sb.append("}");
}
sb.append("]");
sb.append("}");
}
} | class Coordinator {
private static final Logger LOG = LogManager.getLogger(Coordinator.class);
private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private static String localIP = FrontendOptions.getLocalHostAddress();
private static Random instanceRandom = new Random();
Status queryStatus = new Status();
Map<TNetworkAddress, Long> addressToBackendID = Maps.newHashMap();
private ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of();
private TDescriptorTable descTable;
private TQueryGlobals queryGlobals = new TQueryGlobals();
private TQueryOptions queryOptions;
private TNetworkAddress coordAddress;
private Lock lock = new ReentrantLock();
private boolean returnedAllResults;
private RuntimeProfile queryProfile;
private List<RuntimeProfile> fragmentProfile;
private Map<PlanFragmentId, FragmentExecParams> fragmentExecParamsMap = Maps.newHashMap();
private List<PlanFragment> fragments;
private List<BackendExecState> backendExecStates = Lists.newArrayList();
private ResultReceiver receiver;
private ConcurrentMap<TUniqueId, BackendExecState> backendExecStateMap =
Maps.newConcurrentMap();
private List<ScanNode> scanNodes;
private Set<TUniqueId> instanceIds = Sets.newHashSet();
private MarkedCountDownLatch<TUniqueId, Long> profileDoneSignal;
private boolean isBlockQuery;
private int numReceivedRows = 0;
private List<String> deltaUrls;
private Map<String, String> loadCounters;
private String trackingUrl;
private List<String> exportFiles;
private List<TTabletCommitInfo> commitInfos = Lists.newArrayList();
private long jobId = -1;
private TUniqueId queryId;
private TResourceInfo tResourceInfo;
private boolean needReport;
private String clusterName;
private final TUniqueId nextInstanceId;
public Coordinator(ConnectContext context, Analyzer analyzer, Planner planner) {
this.isBlockQuery = planner.isBlockQuery();
this.queryId = context.queryId();
this.fragments = planner.getFragments();
this.scanNodes = planner.getScanNodes();
this.descTable = analyzer.getDescTbl().toThrift();
this.returnedAllResults = false;
this.queryOptions = context.getSessionVariable().toThrift();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
if (context.getSessionVariable().getTimeZone().equals("CST")) {
this.queryGlobals.setTime_zone(TimeUtils.DEFAULT_TIME_ZONE);
} else {
this.queryGlobals.setTime_zone(context.getSessionVariable().getTimeZone());
}
this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(),
context.getSessionVariable().getResourceGroup());
this.needReport = context.getSessionVariable().isReportSucc();
this.clusterName = context.getClusterName();
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public Coordinator(Long jobId, TUniqueId queryId, DescriptorTable descTable,
List<PlanFragment> fragments, List<ScanNode> scanNodes, String cluster, String timezone) {
this.isBlockQuery = true;
this.jobId = jobId;
this.queryId = queryId;
this.descTable = descTable.toThrift();
this.fragments = fragments;
this.scanNodes = scanNodes;
this.queryOptions = new TQueryOptions();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
this.queryGlobals.setTime_zone(timezone);
this.tResourceInfo = new TResourceInfo("", "");
this.needReport = true;
this.clusterName = cluster;
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public long getJobId() {
return jobId;
}
public TUniqueId getQueryId() {
return queryId;
}
public void setQueryId(TUniqueId queryId) {
this.queryId = queryId;
}
public void setQueryType(TQueryType type) {
this.queryOptions.setQuery_type(type);
}
public Status getExecStatus() {
return queryStatus;
}
public RuntimeProfile getQueryProfile() {
return queryProfile;
}
public List<String> getDeltaUrls() {
return deltaUrls;
}
public Map<String, String> getLoadCounters() {
return loadCounters;
}
public String getTrackingUrl() {
return trackingUrl;
}
public void setExecMemoryLimit(long execMemoryLimit) {
this.queryOptions.setMem_limit(execMemoryLimit);
}
public void setTimeout(int timeout) {
this.queryOptions.setQuery_timeout(timeout);
}
public void clearExportStatus() {
lock.lock();
try {
this.backendExecStates.clear();
this.backendExecStateMap.clear();
this.queryStatus.setStatus(new Status());
if (this.exportFiles == null) {
this.exportFiles = Lists.newArrayList();
}
this.exportFiles.clear();
} finally {
lock.unlock();
}
}
public List<TTabletCommitInfo> getCommitInfos() {
return commitInfos;
}
private void prepare() {
for (PlanFragment fragment : fragments) {
fragmentExecParamsMap.put(fragment.getFragmentId(), new FragmentExecParams(fragment));
}
for (PlanFragment fragment : fragments) {
if (!(fragment.getSink() instanceof DataStreamSink)) {
continue;
}
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getDestFragment().getFragmentId());
params.inputFragments.add(fragment.getFragmentId());
}
coordAddress = new TNetworkAddress(localIP, Config.rpc_port);
int fragmentSize = fragments.size();
queryProfile = new RuntimeProfile("Execution Profile " + DebugUtil.printId(queryId));
fragmentProfile = new ArrayList<RuntimeProfile>();
for (int i = 0; i < fragmentSize; i ++) {
fragmentProfile.add(new RuntimeProfile("Fragment " + i));
queryProfile.addChild(fragmentProfile.get(i));
}
this.idToBackend = Catalog.getCurrentSystemInfo().getBackendsInCluster(clusterName);
if (LOG.isDebugEnabled()) {
LOG.debug("idToBackend size={}", idToBackend.size());
for (Map.Entry<Long, Backend> entry : idToBackend.entrySet()) {
Long backendID = entry.getKey();
Backend backend = entry.getValue();
LOG.debug("backend: {}-{}-{}", backendID, backend.getHost(), backend.getBePort());
}
}
}
private void lock() {
lock.lock();
}
private void unlock() {
lock.unlock();
}
private void traceInstance() {
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
int idx = 0;
sb.append("query id=").append(DebugUtil.printId(queryId)).append(",");
sb.append("fragment=[");
for (Map.Entry<PlanFragmentId, FragmentExecParams> entry : fragmentExecParamsMap.entrySet()) {
if (idx++ != 0) {
sb.append(",");
}
sb.append(entry.getKey());
entry.getValue().appendTo(sb);
}
sb.append("]");
LOG.debug(sb.toString());
}
}
public void exec() throws Exception {
if (!scanNodes.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, planNode: {}",
DebugUtil.printId(queryId), scanNodes.get(0).treeToThrift());
}
if (!fragments.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, fragment: {}",
DebugUtil.printId(queryId), fragments.get(0).toThrift());
}
prepare();
computeScanRangeAssignment();
computeFragmentExecParams();
traceInstance();
PlanFragmentId topId = fragments.get(0).getFragmentId();
FragmentExecParams topParams = fragmentExecParamsMap.get(topId);
if (topParams.fragment.getSink() instanceof ResultSink) {
receiver = new ResultReceiver(
topParams.instanceExecParams.get(0).instanceId,
addressToBackendID.get(topParams.instanceExecParams.get(0).host),
toBrpcHost(topParams.instanceExecParams.get(0).host),
queryOptions.query_timeout * 1000);
} else {
this.queryOptions.setIs_report_success(true);
deltaUrls = Lists.newArrayList();
loadCounters = Maps.newHashMap();
}
profileDoneSignal = new MarkedCountDownLatch<TUniqueId, Long>(instanceIds.size());
for (TUniqueId instanceId : instanceIds) {
profileDoneSignal.addMark(instanceId, -1L /* value is meaningless */);
}
lock();
try {
int backendId = 0;
int profileFragmentId = 0;
long memoryLimit = queryOptions.getMem_limit();
for (PlanFragment fragment : fragments) {
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
int instanceNum = params.instanceExecParams.size();
Preconditions.checkState(instanceNum > 0);
List<TExecPlanFragmentParams> tParams = params.toThrift(backendId);
List<Pair<BackendExecState, Future<PExecPlanFragmentResult>>> futures = Lists.newArrayList();
if (colocateFragmentIds.contains(fragment.getFragmentId().asInt())) {
int rate = Math.min(Config.query_colocate_join_memory_limit_penalty_factor, instanceNum);
long newmemory = memoryLimit / rate;
for (TExecPlanFragmentParams tParam : tParams) {
tParam.query_options.setMem_limit(newmemory);
}
}
int instanceId = 0;
for (TExecPlanFragmentParams tParam : tParams) {
BackendExecState execState =
new BackendExecState(fragment.getFragmentId(), instanceId++,
profileFragmentId, tParam, this.addressToBackendID);
backendExecStates.add(execState);
backendExecStateMap.put(tParam.params.getFragment_instance_id(), execState);
futures.add(Pair.create(execState, execState.execRemoteFragmentAsync()));
backendId++;
}
for (Pair<BackendExecState, Future<PExecPlanFragmentResult>> pair : futures) {
TStatusCode code = TStatusCode.INTERNAL_ERROR;
String errMsg = null;
try {
PExecPlanFragmentResult result = pair.second.get(Config.remote_fragment_exec_timeout_ms,
TimeUnit.MILLISECONDS);
code = TStatusCode.findByValue(result.status.status_code);
if (result.status.error_msgs != null && !result.status.error_msgs.isEmpty()) {
errMsg = result.status.error_msgs.get(0);
}
} catch (ExecutionException e) {
LOG.warn("catch a execute exception", e);
code = TStatusCode.THRIFT_RPC_ERROR;
} catch (InterruptedException e) {
LOG.warn("catch a interrupt exception", e);
code = TStatusCode.INTERNAL_ERROR;
} catch (TimeoutException e) {
LOG.warn("catch a timeout exception", e);
code = TStatusCode.TIMEOUT;
}
if (code != TStatusCode.OK) {
if (errMsg == null) {
errMsg = "exec rpc error. backend id: " + pair.first.backendId;
}
queryStatus.setStatus(errMsg);
LOG.warn("exec plan fragment failed, errmsg={}, fragmentId={}, backend={}:{}",
errMsg, fragment.getFragmentId(),
pair.first.address.hostname, pair.first.address.port);
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
switch (code) {
case TIMEOUT:
throw new UserException("query timeout. backend id: " + pair.first.backendId);
case THRIFT_RPC_ERROR:
SimpleScheduler.updateBlacklistBackends(pair.first.backendId);
throw new RpcException("rpc failed. backend id: " + pair.first.backendId);
default:
throw new UserException(errMsg);
}
}
}
profileFragmentId += 1;
}
attachInstanceProfileToFragmentProfile();
} finally {
unlock();
}
}
public List<String> getExportFiles() {
return exportFiles;
}
void updateExportFiles(List<String> files) {
lock.lock();
try {
if (exportFiles == null) {
exportFiles = Lists.newArrayList();
}
exportFiles.addAll(files);
} finally {
lock.unlock();
}
}
void updateDeltas(List<String> urls) {
lock.lock();
try {
deltaUrls.addAll(urls);
} finally {
lock.unlock();
}
}
private void updateLoadCounters(Map<String, String> newLoadCounters) {
lock.lock();
try {
long numRowsNormal = 0L;
String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal = Long.valueOf(value);
}
long numRowsAbnormal = 0L;
value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal = Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal += Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal += Long.valueOf(value);
}
this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal);
this.loadCounters.put(LoadEtlTask.DPP_ABNORMAL_ALL, "" + numRowsAbnormal);
} finally {
lock.unlock();
}
}
private void updateCommitInfos(List<TTabletCommitInfo> commitInfos) {
lock.lock();
try {
this.commitInfos.addAll(commitInfos);
} finally {
lock.unlock();
}
}
private void updateStatus(Status status, TUniqueId instanceId) {
lock.lock();
try {
if (returnedAllResults && status.isCancelled()) {
return;
}
if (status.ok()) {
return;
}
if (!queryStatus.ok()) {
return;
}
queryStatus.setStatus(status);
LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}",
jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN");
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
} finally {
lock.unlock();
}
}
public RowBatch getNext() throws Exception {
if (receiver == null) {
throw new UserException("There is no receiver.");
}
RowBatch resultBatch;
Status status = new Status();
resultBatch = receiver.getNext(status);
if (!status.ok()) {
LOG.warn("get next fail, need cancel. query id: {}", DebugUtil.printId(queryId));
}
updateStatus(status, null /* no instance id */);
Status copyStatus = null;
lock();
try {
copyStatus = new Status(queryStatus);
} finally {
unlock();
}
if (!copyStatus.ok()) {
if (Strings.isNullOrEmpty(copyStatus.getErrorMsg())) {
copyStatus.rewriteErrorMsg();
}
if (copyStatus.isRpcError()) {
throw new RpcException(copyStatus.getErrorMsg());
} else {
String errMsg = copyStatus.getErrorMsg();
LOG.warn("query failed: {}", errMsg);
int hostIndex = errMsg.indexOf("host");
if (hostIndex != -1) {
errMsg = errMsg.substring(0, hostIndex);
}
throw new UserException(errMsg);
}
}
if (resultBatch.isEos()) {
this.returnedAllResults = true;
Long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
boolean hasLimit = numLimitRows > 0;
if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) {
LOG.debug("no block query, return num >= limit rows, need cancel");
cancelInternal(PPlanFragmentCancelReason.LIMIT_REACH);
}
} else {
numReceivedRows += resultBatch.getBatch().getRowsSize();
}
return resultBatch;
}
public void cancel() {
lock();
try {
if (!queryStatus.ok()) {
return;
} else {
queryStatus.setStatus(Status.CANCELLED);
}
LOG.warn("cancel execution of query, this is outside invoke");
cancelInternal(PPlanFragmentCancelReason.USER_CANCEL);
} finally {
unlock();
}
}
private void cancelInternal(PPlanFragmentCancelReason cancelReason) {
if (null != receiver) {
receiver.cancel();
}
cancelRemoteFragmentsAsync(cancelReason);
if (profileDoneSignal != null) {
profileDoneSignal.countDownToZero(new Status());
LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e->DebugUtil.printId(e.getKey())).toArray());
}
}
private void cancelRemoteFragmentsAsync(PPlanFragmentCancelReason cancelReason) {
for (BackendExecState backendExecState : backendExecStates) {
TNetworkAddress address = backendExecState.getBackendAddress();
LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} ip={} port={} fragment instance id={}, reason: {}",
backendExecState.initiated, backendExecState.done, backendExecState.hasCanceled,
address.hostname, address.port, DebugUtil.printId(backendExecState.getFragmentInstanceId()),
cancelReason.name());
backendExecState.lock();
try {
if (!backendExecState.initiated) {
continue;
}
if (backendExecState.done) {
continue;
}
if (backendExecState.hasCanceled) {
continue;
}
TNetworkAddress brpcAddress = toBrpcHost(address);
try {
BackendServiceProxy.getInstance().cancelPlanFragmentAsync(
brpcAddress, backendExecState.getFragmentInstanceId(), cancelReason);
} catch (RpcException e) {
LOG.warn("cancel plan fragment get a exception, address={}:{}",
brpcAddress.getHostname(), brpcAddress.getPort());
SimpleScheduler.updateBlacklistBackends(addressToBackendID.get(brpcAddress));
}
backendExecState.hasCanceled = true;
} catch (Exception e) {
LOG.warn("catch a exception", e);
} finally {
backendExecState.unlock();
}
}
}
private void computeFragmentExecParams() throws Exception {
computeFragmentHosts();
instanceIds.clear();
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size());
for (int j = 0; j < params.instanceExecParams.size(); ++j) {
TUniqueId instanceId = new TUniqueId();
instanceId.setHi(queryId.hi);
instanceId.setLo(queryId.lo + instanceIds.size() + 1);
params.instanceExecParams.get(j).instanceId = instanceId;
instanceIds.add(instanceId);
}
}
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
PlanFragment destFragment = params.fragment.getDestFragment();
if (destFragment == null) {
continue;
}
FragmentExecParams destParams = fragmentExecParamsMap.get(destFragment.getFragmentId());
DataSink sink = params.fragment.getSink();
PlanNodeId exchId = sink.getExchNodeId();
if (destParams.perExchNumSenders.get(exchId.asInt()) == null) {
destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size());
} else {
destParams.perExchNumSenders.put(exchId.asInt(),
params.instanceExecParams.size() + destParams.perExchNumSenders.get(exchId.asInt()));
}
for (int j = 0; j < destParams.instanceExecParams.size(); ++j) {
TPlanFragmentDestination dest = new TPlanFragmentDestination();
dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId;
dest.server = toRpcHost(destParams.instanceExecParams.get(j).host);
dest.setBrpc_server(toBrpcHost(destParams.instanceExecParams.get(j).host));
params.destinations.add(dest);
}
}
}
private TNetworkAddress toRpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
TNetworkAddress dest = new TNetworkAddress(backend.getHost(), backend.getBeRpcPort());
return dest;
}
private TNetworkAddress toBrpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
if (backend.getBrpcPort() < 0) {
return null;
}
return new TNetworkAddress(backend.getHost(), backend.getBrpcPort());
}
private boolean containsUnionNode(PlanNode node) {
if (node instanceof UnionNode) {
return true;
}
for (PlanNode child : node.getChildren()) {
if (child instanceof ExchangeNode) {
continue;
} else if (child instanceof UnionNode) {
return true;
} else {
return containsUnionNode(child);
}
}
return false;
}
private boolean isColocateJoin(PlanNode node) {
if (Config.disable_colocate_join) {
return false;
}
if (ConnectContext.get() != null) {
if (ConnectContext.get().getSessionVariable().isDisableColocateJoin()) {
return false;
}
}
if (colocateFragmentIds.contains(node.getFragmentId().asInt())) {
return true;
}
if (node instanceof HashJoinNode) {
HashJoinNode joinNode = (HashJoinNode) node;
if (joinNode.isColocate()) {
colocateFragmentIds.add(joinNode.getFragmentId().asInt());
return true;
}
}
for (PlanNode childNode : node.getChildren()) {
return isColocateJoin(childNode);
}
return false;
}
private PlanNode findLeftmostNode(PlanNode plan) {
PlanNode newPlan = plan;
while (newPlan.getChildren().size() != 0 && !(newPlan instanceof ExchangeNode)) {
newPlan = newPlan.getChild(0);
}
return newPlan;
}
private <K, V> V findOrInsert(HashMap<K, V> m, final K key, final V defaultVal) {
V value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private List<TScanRangeParams> findOrInsert(Map<Integer, List<TScanRangeParams>> m, Integer key,
ArrayList<TScanRangeParams> defaultVal) {
List<TScanRangeParams> value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private long getScanRangeLength(final TScanRange scanRange) {
return 1;
}
private void computeScanRangeAssignment() throws Exception {
for (ScanNode scanNode : scanNodes) {
List<TScanRangeLocations> locations = scanNode.getScanRangeLocations(0);
if (locations == null) {
continue;
}
FragmentScanRangeAssignment assignment =
fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment;
if (isColocateJoin(scanNode.getFragment().getPlanRoot())) {
computeScanRangeAssignmentByColocate((OlapScanNode) scanNode, assignment);
} else {
computeScanRangeAssignmentByScheduler(scanNode, locations, assignment);
}
}
}
private void computeScanRangeAssignmentByColocate(
final OlapScanNode scanNode,
FragmentScanRangeAssignment assignment) throws Exception {
for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) {
List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq);
if (!bucketSeqToAddress.containsKey(bucketSeq)) {
getExecHostPortForBucketSeq(locations.get(0), bucketSeq);
}
for(TScanRangeLocations location: locations) {
Map<Integer, List<TScanRangeParams>> scanRanges =
findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList =
findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = location.scan_range;
scanRangeParamsList.add(scanRangeParams);
}
}
}
private void getExecHostPortForBucketSeq(TScanRangeLocations seqLocation, Integer bucketSeq) throws Exception {
int randomLocation = new Random().nextInt(seqLocation.locations.size());
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(seqLocation.locations.get(randomLocation).backend_id, seqLocation.locations, this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
this.bucketSeqToAddress.put(bucketSeq, execHostPort);
}
private void computeScanRangeAssignmentByScheduler(
final ScanNode scanNode,
final List<TScanRangeLocations> locations,
FragmentScanRangeAssignment assignment) throws Exception {
HashMap<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap();
for (TScanRangeLocations scanRangeLocations : locations) {
Long minAssignedBytes = Long.MAX_VALUE;
TScanRangeLocation minLocation = null;
for (final TScanRangeLocation location : scanRangeLocations.getLocations()) {
Long assignedBytes = findOrInsert(assignedBytesPerHost, location.server, 0L);
if (assignedBytes < minAssignedBytes) {
minAssignedBytes = assignedBytes;
minLocation = location;
}
}
Long scanRangeLength = getScanRangeLength(scanRangeLocations.scan_range);
assignedBytesPerHost.put(minLocation.server,
assignedBytesPerHost.get(minLocation.server) + scanRangeLength);
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(minLocation.backend_id,
scanRangeLocations.getLocations(), this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(assignment, execHostPort,
new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(),
new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = scanRangeLocations.scan_range;
scanRangeParams.setVolume_id(minLocation.volume_id);
scanRangeParamsList.add(scanRangeParams);
}
}
public void updateFragmentExecStatus(TReportExecStatusParams params) {
if (params.backend_num >= backendExecStates.size()) {
LOG.warn("unknown backend number: {}, expected less than: {}",
params.backend_num, backendExecStates.size());
return;
}
boolean done = false;
BackendExecState execState = backendExecStates.get(params.backend_num);
execState.lock();
try {
if (execState.done) {
return;
}
if (params.isSetProfile()) {
execState.profile.update(params.profile);
}
done = params.done;
execState.done = params.done;
} finally {
execState.unlock();
}
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
execState.profile().prettyPrint(builder, "");
LOG.debug("profile for query_id={} instance_id={}\n{}",
DebugUtil.printId(queryId),
DebugUtil.printId(params.getFragment_instance_id()),
builder.toString());
}
Status status = new Status(params.status);
if (!(returnedAllResults && status.isCancelled()) && !status.ok()) {
LOG.warn("one instance report fail, query_id={} instance_id={}",
DebugUtil.printId(queryId), DebugUtil.printId(params.getFragment_instance_id()));
updateStatus(status, params.getFragment_instance_id());
}
if (done) {
if (params.isSetDelta_urls()) {
updateDeltas(params.getDelta_urls());
}
if (params.isSetLoad_counters()) {
updateLoadCounters(params.getLoad_counters());
}
if (params.isSetTracking_url()) {
trackingUrl = params.tracking_url;
}
if (params.isSetExport_files()) {
updateExportFiles(params.export_files);
}
if (params.isSetCommitInfos()) {
updateCommitInfos(params.getCommitInfos());
}
profileDoneSignal.markedCountDown(params.getFragment_instance_id(), -1L);
}
if (params.isSetLoaded_rows()) {
Catalog.getCurrentCatalog().getLoadManager().updateJobLoadedRows(jobId, params.query_id, params.loaded_rows);
}
return;
}
public void endProfile() {
if (backendExecStates.isEmpty()) {
return;
}
if (needReport) {
try {
profileDoneSignal.await(2, TimeUnit.SECONDS);
} catch (InterruptedException e1) {
LOG.warn("signal await error", e1);
}
}
for (int i = 1; i < fragmentProfile.size(); ++i) {
fragmentProfile.get(i).sortChildren();
}
}
public boolean join(int seconds) {
try {
return profileDoneSignal.await(seconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
}
return false;
}
public boolean isDone() {
return profileDoneSignal.getCount() == 0;
}
class FragmentScanRangeAssignment
extends HashMap<TNetworkAddress, Map<Integer, List<TScanRangeParams>>> {
}
class BucketSeqToScanRange extends HashMap<Integer, Map<Integer, List<TScanRangeParams>>> {
}
private BucketSeqToScanRange bucketSeqToScanRange = new BucketSeqToScanRange();
private Map<Integer, TNetworkAddress> bucketSeqToAddress = Maps.newHashMap();
private Set<Integer> colocateFragmentIds = new HashSet<>();
public class BackendExecState {
TExecPlanFragmentParams rpcParams;
private PlanFragmentId fragmentId;
private int instanceId;
private boolean initiated;
private boolean done;
private boolean hasCanceled;
private Lock lock = new ReentrantLock();
private int profileFragmentId;
RuntimeProfile profile;
TNetworkAddress address;
Long backendId;
public int profileFragmentId() {
return profileFragmentId;
}
public boolean initiated() {
return initiated;
}
public RuntimeProfile profile() {
return profile;
}
public void lock() {
lock.lock();
}
public void unlock() {
lock.unlock();
}
public int getInstanceId() {
return instanceId;
}
public PlanFragmentId getFragmentId() {
return fragmentId;
}
public BackendExecState(PlanFragmentId fragmentId, int instanceId, int profileFragmentId,
TExecPlanFragmentParams rpcParams, Map<TNetworkAddress, Long> addressToBackendID) {
this.profileFragmentId = profileFragmentId;
this.fragmentId = fragmentId;
this.instanceId = instanceId;
this.rpcParams = rpcParams;
this.initiated = false;
this.done = false;
this.address = fragmentExecParamsMap.get(fragmentId).instanceExecParams.get(instanceId).host;
this.backendId = addressToBackendID.get(address);
String name = "Instance " + DebugUtil.printId(fragmentExecParamsMap.get(fragmentId)
.instanceExecParams.get(instanceId).instanceId) + " (host=" + address + ")";
this.profile = new RuntimeProfile(name);
this.hasCanceled = false;
}
public TNetworkAddress getBackendAddress() {
return address;
}
public TUniqueId getFragmentInstanceId() {
return this.rpcParams.params.getFragment_instance_id();
}
public Future<PExecPlanFragmentResult> execRemoteFragmentAsync() throws TException, RpcException {
TNetworkAddress brpcAddress = null;
try {
brpcAddress = toBrpcHost(address);
} catch (Exception e) {
throw new TException(e.getMessage());
}
initiated = true;
try {
return BackendServiceProxy.getInstance().execPlanFragmentAsync(brpcAddress, rpcParams);
} catch (RpcException e) {
SimpleScheduler.updateBlacklistBackends(backendId);
throw e;
}
}
}
protected class FragmentExecParams {
public PlanFragment fragment;
public List<TPlanFragmentDestination> destinations = Lists.newArrayList();
public Map<Integer, Integer> perExchNumSenders = Maps.newHashMap();
public List<PlanFragmentId> inputFragments = Lists.newArrayList();
public List<FInstanceExecParam> instanceExecParams = Lists.newArrayList();
public FragmentScanRangeAssignment scanRangeAssignment = new FragmentScanRangeAssignment();
public FragmentExecParams(PlanFragment fragment) {
this.fragment = fragment;
}
List<TExecPlanFragmentParams> toThrift(int backendNum) {
List<TExecPlanFragmentParams> paramsList = Lists.newArrayList();
for (int i = 0; i < instanceExecParams.size(); ++i) {
final FInstanceExecParam instanceExecParam = instanceExecParams.get(i);
TExecPlanFragmentParams params = new TExecPlanFragmentParams();
params.setProtocol_version(PaloInternalServiceVersion.V1);
params.setFragment(fragment.toThrift());
params.setDesc_tbl(descTable);
params.setParams(new TPlanFragmentExecParams());
params.setResource_info(tResourceInfo);
params.params.setQuery_id(queryId);
params.params.setFragment_instance_id(instanceExecParam.instanceId);
Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges;
if (scanRanges == null) {
scanRanges = Maps.newHashMap();
}
params.params.setPer_node_scan_ranges(scanRanges);
params.params.setPer_exch_num_senders(perExchNumSenders);
params.params.setDestinations(destinations);
params.params.setSender_id(i);
params.params.setNum_senders(instanceExecParams.size());
params.setCoord(coordAddress);
params.setBackend_num(backendNum++);
params.setQuery_globals(queryGlobals);
params.setQuery_options(queryOptions);
params.params.setSend_query_statistics_with_every_batch(
fragment.isTransferQueryStatisticsWithEveryBatch());
if (queryOptions.getQuery_type() == TQueryType.LOAD) {
LoadErrorHub.Param param = Catalog.getCurrentCatalog().getLoadInstance().getLoadErrorHubInfo();
if (param != null) {
TLoadErrorHubInfo info = param.toThrift();
if (info != null) {
params.setLoad_error_hub_info(info);
}
}
}
paramsList.add(params);
}
return paramsList;
}
public void appendScanRange(StringBuilder sb, List<TScanRangeParams> params) {
sb.append("range=[");
int idx = 0;
for (TScanRangeParams range : params) {
TPaloScanRange paloScanRange = range.getScan_range().getPalo_scan_range();
if (paloScanRange != null) {
if (idx++ != 0) {
sb.append(",");
}
sb.append("{tid=").append(paloScanRange.getTablet_id())
.append(",ver=").append(paloScanRange.getVersion()).append("}");
}
TEsScanRange esScanRange = range.getScan_range().getEs_scan_range();
if (esScanRange != null) {
sb.append("{ index=").append(esScanRange.getIndex())
.append(", shardid=").append(esScanRange.getShard_id())
.append("}");
}
}
sb.append("]");
}
public void appendTo(StringBuilder sb) {
sb.append("{plan=");
fragment.getPlanRoot().appendTrace(sb);
sb.append(",instance=[");
for (int i = 0; i < instanceExecParams.size(); ++i) {
if (i != 0) {
sb.append(",");
}
TNetworkAddress address = instanceExecParams.get(i).host;
Map<Integer, List<TScanRangeParams>> scanRanges =
scanRangeAssignment.get(address);
sb.append("{");
sb.append("id=").append(DebugUtil.printId(instanceExecParams.get(i).instanceId));
sb.append(",host=").append(instanceExecParams.get(i).host);
if (scanRanges == null) {
sb.append("}");
continue;
}
sb.append(",range=[");
int eIdx = 0;
for (Map.Entry<Integer, List<TScanRangeParams>> entry : scanRanges.entrySet()) {
if (eIdx++ != 0) {
sb.append(",");
}
sb.append("id").append(entry.getKey()).append(",");
appendScanRange(sb, entry.getValue());
}
sb.append("]");
sb.append("}");
}
sb.append("]");
sb.append("}");
}
} |
So, did you figure out what happens if expectedState is reserved here? Add comment? | public List<Node> addNodesInState(List<Node> nodes, Node.State expectedState) {
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
if (node.state() != expectedState)
throw new IllegalArgumentException(node + " is not in the " + node.state() + " state");
node = node.with(node.history().recordStateTransition(null, expectedState, Agent.system, clock.instant()));
curatorTransaction.add(CuratorOperations.create(toPath(node).getAbsolute(), nodeSerializer.toJson(node)));
}
transaction.commit();
for (Node node : nodes)
log.log(LogLevel.INFO, "Added " + node);
return nodes;
} | node = node.with(node.history().recordStateTransition(null, expectedState, Agent.system, clock.instant())); | public List<Node> addNodesInState(List<Node> nodes, Node.State expectedState) {
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
if (node.state() != expectedState)
throw new IllegalArgumentException(node + " is not in the " + node.state() + " state");
node = node.with(node.history().recordStateTransition(null, expectedState, Agent.system, clock.instant()));
curatorTransaction.add(CuratorOperations.create(toPath(node).getAbsolute(), nodeSerializer.toJson(node)));
}
transaction.commit();
for (Node node : nodes)
log.log(LogLevel.INFO, "Added " + node);
return nodes;
} | class CuratorDatabaseClient {
private static final Logger log = Logger.getLogger(CuratorDatabaseClient.class.getName());
private static final Path root = Path.fromString("/provision/v1");
private static final Duration defaultLockTimeout = Duration.ofMinutes(1);
private final NodeSerializer nodeSerializer;
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final CuratorDatabase curatorDatabase;
private final Clock clock;
private final Zone zone;
public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, Zone zone) {
this.nodeSerializer = new NodeSerializer(flavors);
this.zone = zone;
boolean useCache = zone.system().equals(SystemName.cd);
this.curatorDatabase = new CuratorDatabase(curator, root, useCache);
this.clock = clock;
initZK();
}
private void initZK() {
curatorDatabase.create(root);
for (Node.State state : Node.State.values())
curatorDatabase.create(toPath(state));
curatorDatabase.create(inactiveJobsPath());
}
/**
* Adds a set of nodes. Rollbacks/fails transaction if any node is not in the expected state.
*/
/**
* Adds a set of nodes in the initial, provisioned state.
*
* @return the given nodes for convenience.
*/
public List<Node> addNodes(List<Node> nodes) {
return addNodesInState(nodes, Node.State.provisioned);
}
/**
* Removes a node.
*
* @param state the current state of the node
* @param hostName the host name of the node to remove
*/
public void removeNode(Node.State state, String hostName) {
Path path = toPath(state, hostName);
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
curatorTransaction.add(CuratorOperations.delete(path.getAbsolute()));
transaction.commit();
log.log(LogLevel.INFO, "Removed: " + state + " node " + hostName);
}
/**
* Writes the given nodes and returns a copy of the incoming nodes in their persisted state.
*
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @return the nodes in their persisted state
*/
public List<Node> writeTo(List<Node> nodes, Agent agent, Optional<String> reason) {
if (nodes.isEmpty()) return Collections.emptyList();
List<Node> writtenNodes = new ArrayList<>(nodes.size());
try (NestedTransaction nestedTransaction = new NestedTransaction()) {
Map<Node.State, List<Node>> nodesByState = nodes.stream().collect(Collectors.groupingBy(Node::state));
for (Map.Entry<Node.State, List<Node>> entry : nodesByState.entrySet()) {
writtenNodes.addAll(writeTo(entry.getKey(), entry.getValue(), agent, reason, nestedTransaction));
nestedTransaction.commit();
}
}
return writtenNodes;
}
/**
* Writes the given nodes to the given state (whether or not they are already in this state or another),
* and returns a copy of the incoming nodes in their persisted state.
*
* @param toState the state to write the nodes to
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @return the nodes in their persisted state
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason) {
try (NestedTransaction nestedTransaction = new NestedTransaction()) {
List<Node> writtenNodes = writeTo(toState, nodes, agent, reason, nestedTransaction);
nestedTransaction.commit();
return writtenNodes;
}
}
public Node writeTo(Node.State toState, Node node, Agent agent, Optional<String> reason) {
return writeTo(toState, Collections.singletonList(node), agent, reason).get(0);
}
/**
* Adds to the given transaction operations to write the given nodes to the given state,
* and returns a copy of the nodes in the state they will have if the transaction is committed.
*
* @param toState the state to write the nodes to
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @param reason an optional reason to be logged, for humans
* @param transaction the transaction to which write operations are added by this
* @return the nodes in their state as it will be written if committed
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason,
NestedTransaction transaction) {
if (nodes.isEmpty()) return nodes;
List<Node> writtenNodes = new ArrayList<>(nodes.size());
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
Node newNode = new Node(node.openStackId(), node.ipAddresses(), node.additionalIpAddresses(), node.hostname(),
node.parentHostname(), node.flavor(),
newNodeStatus(node, toState),
toState,
toState.isAllocated() ? node.allocation() : Optional.empty(),
node.history().recordStateTransition(node.state(), toState, agent, clock.instant()),
node.type());
curatorTransaction.add(CuratorOperations.delete(toPath(node).getAbsolute()))
.add(CuratorOperations.create(toPath(toState, newNode.hostname()).getAbsolute(), nodeSerializer.toJson(newNode)));
writtenNodes.add(newNode);
}
transaction.onCommitted(() -> {
for (Node node : nodes) {
if (toState != node.state())
log.log(LogLevel.INFO, agent + " moved " + node + " to " + toState + reason.map(s -> ": " + s).orElse(""));
}
});
return writtenNodes;
}
private Status newNodeStatus(Node node, Node.State toState) {
if (node.state() != Node.State.failed && toState == Node.State.failed) return node.status().withIncreasedFailCount();
if (node.state() == Node.State.failed && toState == Node.State.active) return node.status().withDecreasedFailCount();
if (node.state() != Node.State.dirty && toState == Node.State.dirty && !needsFastNodeReuse(zone))
return node.status().withReboot(node.status().reboot().withIncreasedWanted());
return node.status();
}
/** In automated test environments, nodes need to be reused quickly to achieve fast test turnaronud time */
private boolean needsFastNodeReuse(Zone zone) {
return zone.environment() == Environment.staging || zone.environment() == Environment.test;
}
/**
* Returns all nodes which are in one of the given states.
* If no states are given this returns all nodes.
*/
public List<Node> getNodes(Node.State ... states) {
List<Node> nodes = new ArrayList<>();
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
for (String hostname : curatorDatabase.getChildren(toPath(state))) {
Optional<Node> node = getNode(hostname, state);
if (node.isPresent()) nodes.add(node.get());
}
}
return nodes;
}
/**
* Returns all nodes allocated to the given application which are in one of the given states
* If no states are given this returns all nodes.
*/
public List<Node> getNodes(ApplicationId applicationId, Node.State ... states) {
List<Node> nodes = getNodes(states);
nodes.removeIf(node -> ! node.allocation().isPresent() || ! node.allocation().get().owner().equals(applicationId));
return nodes;
}
/**
* Returns a particular node, or empty if this noe is not in any of the given states.
* If no states are given this returns the node if it is present in any state.
*/
public Optional<Node> getNode(String hostname, Node.State ... states) {
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
Optional<byte[]> nodeData = curatorDatabase.getData(toPath(state, hostname));
if (nodeData.isPresent())
return nodeData.map((data) -> nodeSerializer.fromJson(state, data));
}
return Optional.empty();
}
private Path toPath(Node.State nodeState) { return root.append(toDir(nodeState)); }
private Path toPath(Node node) {
return root.append(toDir(node.state())).append(node.hostname());
}
private Path toPath(Node.State nodeState, String nodeName) {
return root.append(toDir(nodeState)).append(nodeName);
}
/** Creates an returns the path to the lock for this application */
private Path lockPath(ApplicationId application) {
Path lockPath =
root
.append("locks")
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curatorDatabase.create(lockPath);
return lockPath;
}
private String toDir(Node.State state) {
switch (state) {
case active: return "allocated";
case dirty: return "dirty";
case failed: return "failed";
case inactive: return "deallocated";
case parked : return "parked";
case provisioned: return "provisioned";
case ready: return "ready";
case reserved: return "reserved";
default: throw new RuntimeException("Node state " + state + " does not map to a directory name");
}
}
/** Acquires the single cluster-global, reentrant lock for all non-active nodes */
public Lock lockInactive() {
return lock(root.append("locks").append("unallocatedLock"), defaultLockTimeout);
}
/** Acquires the single cluster-global, reentrant lock for active nodes of this application */
public Lock lock(ApplicationId application) {
return lock(application, defaultLockTimeout);
}
/** Acquires the single cluster-global, reentrant lock with the specified timeout for active nodes of this application */
public Lock lock(ApplicationId application, Duration timeout) {
try {
return lock(lockPath(application), timeout);
}
catch (UncheckedTimeoutException e) {
throw new ApplicationLockException(e);
}
}
private Lock lock(Path path, Duration timeout) {
return curatorDatabase.lock(path, timeout);
}
/**
* Returns a default flavor specific for an application, or empty if not available.
*/
public Optional<String> getDefaultFlavorForApplication(ApplicationId applicationId) {
Optional<byte[]> utf8DefaultFlavor = curatorDatabase.getData(defaultFlavorPath(applicationId));
return utf8DefaultFlavor.map((flavor) -> new String(flavor, StandardCharsets.UTF_8));
}
private Path defaultFlavorPath(ApplicationId applicationId) {
return root.append("defaultFlavor").append(applicationId.serializedForm());
}
public Set<String> readInactiveJobs() {
try {
byte[] data = curatorDatabase.getData(inactiveJobsPath()).get();
if (data.length == 0) return new HashSet<>();
return stringSetSerializer.fromJson(data);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
curatorTransaction.add(CuratorOperations.setData(inactiveJobsPath().getAbsolute(),
stringSetSerializer.toJson(inactiveJobs)));
transaction.commit();
}
public Lock lockInactiveJobs() {
return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout);
}
private Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
} | class CuratorDatabaseClient {
private static final Logger log = Logger.getLogger(CuratorDatabaseClient.class.getName());
private static final Path root = Path.fromString("/provision/v1");
private static final Duration defaultLockTimeout = Duration.ofMinutes(1);
private final NodeSerializer nodeSerializer;
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final CuratorDatabase curatorDatabase;
private final Clock clock;
private final Zone zone;
public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, Zone zone) {
this.nodeSerializer = new NodeSerializer(flavors);
this.zone = zone;
boolean useCache = zone.system().equals(SystemName.cd);
this.curatorDatabase = new CuratorDatabase(curator, root, useCache);
this.clock = clock;
initZK();
}
private void initZK() {
curatorDatabase.create(root);
for (Node.State state : Node.State.values())
curatorDatabase.create(toPath(state));
curatorDatabase.create(inactiveJobsPath());
}
/**
* Adds a set of nodes. Rollbacks/fails transaction if any node is not in the expected state.
*/
/**
* Adds a set of nodes in the initial, provisioned state.
*
* @return the given nodes for convenience.
*/
public List<Node> addNodes(List<Node> nodes) {
return addNodesInState(nodes, Node.State.provisioned);
}
/**
* Removes a node.
*
* @param state the current state of the node
* @param hostName the host name of the node to remove
*/
public void removeNode(Node.State state, String hostName) {
Path path = toPath(state, hostName);
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
curatorTransaction.add(CuratorOperations.delete(path.getAbsolute()));
transaction.commit();
log.log(LogLevel.INFO, "Removed: " + state + " node " + hostName);
}
/**
* Writes the given nodes and returns a copy of the incoming nodes in their persisted state.
*
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @return the nodes in their persisted state
*/
public List<Node> writeTo(List<Node> nodes, Agent agent, Optional<String> reason) {
if (nodes.isEmpty()) return Collections.emptyList();
List<Node> writtenNodes = new ArrayList<>(nodes.size());
try (NestedTransaction nestedTransaction = new NestedTransaction()) {
Map<Node.State, List<Node>> nodesByState = nodes.stream().collect(Collectors.groupingBy(Node::state));
for (Map.Entry<Node.State, List<Node>> entry : nodesByState.entrySet()) {
writtenNodes.addAll(writeTo(entry.getKey(), entry.getValue(), agent, reason, nestedTransaction));
nestedTransaction.commit();
}
}
return writtenNodes;
}
/**
* Writes the given nodes to the given state (whether or not they are already in this state or another),
* and returns a copy of the incoming nodes in their persisted state.
*
* @param toState the state to write the nodes to
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @return the nodes in their persisted state
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason) {
try (NestedTransaction nestedTransaction = new NestedTransaction()) {
List<Node> writtenNodes = writeTo(toState, nodes, agent, reason, nestedTransaction);
nestedTransaction.commit();
return writtenNodes;
}
}
public Node writeTo(Node.State toState, Node node, Agent agent, Optional<String> reason) {
return writeTo(toState, Collections.singletonList(node), agent, reason).get(0);
}
/**
* Adds to the given transaction operations to write the given nodes to the given state,
* and returns a copy of the nodes in the state they will have if the transaction is committed.
*
* @param toState the state to write the nodes to
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @param reason an optional reason to be logged, for humans
* @param transaction the transaction to which write operations are added by this
* @return the nodes in their state as it will be written if committed
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason,
NestedTransaction transaction) {
if (nodes.isEmpty()) return nodes;
List<Node> writtenNodes = new ArrayList<>(nodes.size());
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
Node newNode = new Node(node.openStackId(), node.ipAddresses(), node.additionalIpAddresses(), node.hostname(),
node.parentHostname(), node.flavor(),
newNodeStatus(node, toState),
toState,
toState.isAllocated() ? node.allocation() : Optional.empty(),
node.history().recordStateTransition(node.state(), toState, agent, clock.instant()),
node.type());
curatorTransaction.add(CuratorOperations.delete(toPath(node).getAbsolute()))
.add(CuratorOperations.create(toPath(toState, newNode.hostname()).getAbsolute(), nodeSerializer.toJson(newNode)));
writtenNodes.add(newNode);
}
transaction.onCommitted(() -> {
for (Node node : nodes) {
if (toState != node.state())
log.log(LogLevel.INFO, agent + " moved " + node + " to " + toState + reason.map(s -> ": " + s).orElse(""));
}
});
return writtenNodes;
}
private Status newNodeStatus(Node node, Node.State toState) {
if (node.state() != Node.State.failed && toState == Node.State.failed) return node.status().withIncreasedFailCount();
if (node.state() == Node.State.failed && toState == Node.State.active) return node.status().withDecreasedFailCount();
if (node.state() != Node.State.dirty && toState == Node.State.dirty && !needsFastNodeReuse(zone))
return node.status().withReboot(node.status().reboot().withIncreasedWanted());
return node.status();
}
/** In automated test environments, nodes need to be reused quickly to achieve fast test turnaronud time */
private boolean needsFastNodeReuse(Zone zone) {
return zone.environment() == Environment.staging || zone.environment() == Environment.test;
}
/**
* Returns all nodes which are in one of the given states.
* If no states are given this returns all nodes.
*/
public List<Node> getNodes(Node.State ... states) {
List<Node> nodes = new ArrayList<>();
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
for (String hostname : curatorDatabase.getChildren(toPath(state))) {
Optional<Node> node = getNode(hostname, state);
if (node.isPresent()) nodes.add(node.get());
}
}
return nodes;
}
/**
* Returns all nodes allocated to the given application which are in one of the given states
* If no states are given this returns all nodes.
*/
public List<Node> getNodes(ApplicationId applicationId, Node.State ... states) {
List<Node> nodes = getNodes(states);
nodes.removeIf(node -> ! node.allocation().isPresent() || ! node.allocation().get().owner().equals(applicationId));
return nodes;
}
/**
* Returns a particular node, or empty if this noe is not in any of the given states.
* If no states are given this returns the node if it is present in any state.
*/
public Optional<Node> getNode(String hostname, Node.State ... states) {
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
Optional<byte[]> nodeData = curatorDatabase.getData(toPath(state, hostname));
if (nodeData.isPresent())
return nodeData.map((data) -> nodeSerializer.fromJson(state, data));
}
return Optional.empty();
}
private Path toPath(Node.State nodeState) { return root.append(toDir(nodeState)); }
private Path toPath(Node node) {
return root.append(toDir(node.state())).append(node.hostname());
}
private Path toPath(Node.State nodeState, String nodeName) {
return root.append(toDir(nodeState)).append(nodeName);
}
/** Creates an returns the path to the lock for this application */
private Path lockPath(ApplicationId application) {
Path lockPath =
root
.append("locks")
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curatorDatabase.create(lockPath);
return lockPath;
}
private String toDir(Node.State state) {
switch (state) {
case active: return "allocated";
case dirty: return "dirty";
case failed: return "failed";
case inactive: return "deallocated";
case parked : return "parked";
case provisioned: return "provisioned";
case ready: return "ready";
case reserved: return "reserved";
default: throw new RuntimeException("Node state " + state + " does not map to a directory name");
}
}
/** Acquires the single cluster-global, reentrant lock for all non-active nodes */
public Lock lockInactive() {
return lock(root.append("locks").append("unallocatedLock"), defaultLockTimeout);
}
/** Acquires the single cluster-global, reentrant lock for active nodes of this application */
public Lock lock(ApplicationId application) {
return lock(application, defaultLockTimeout);
}
/** Acquires the single cluster-global, reentrant lock with the specified timeout for active nodes of this application */
public Lock lock(ApplicationId application, Duration timeout) {
try {
return lock(lockPath(application), timeout);
}
catch (UncheckedTimeoutException e) {
throw new ApplicationLockException(e);
}
}
private Lock lock(Path path, Duration timeout) {
return curatorDatabase.lock(path, timeout);
}
/**
* Returns a default flavor specific for an application, or empty if not available.
*/
public Optional<String> getDefaultFlavorForApplication(ApplicationId applicationId) {
Optional<byte[]> utf8DefaultFlavor = curatorDatabase.getData(defaultFlavorPath(applicationId));
return utf8DefaultFlavor.map((flavor) -> new String(flavor, StandardCharsets.UTF_8));
}
private Path defaultFlavorPath(ApplicationId applicationId) {
return root.append("defaultFlavor").append(applicationId.serializedForm());
}
public Set<String> readInactiveJobs() {
try {
byte[] data = curatorDatabase.getData(inactiveJobsPath()).get();
if (data.length == 0) return new HashSet<>();
return stringSetSerializer.fromJson(data);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
curatorTransaction.add(CuratorOperations.setData(inactiveJobsPath().getAbsolute(),
stringSetSerializer.toJson(inactiveJobs)));
transaction.commit();
}
public Lock lockInactiveJobs() {
return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout);
}
private Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
} |
Didn't really look, I don't think it should matter either way if the node immediately goes from `reserved` straight to `active`? | public List<Node> addNodesInState(List<Node> nodes, Node.State expectedState) {
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
if (node.state() != expectedState)
throw new IllegalArgumentException(node + " is not in the " + node.state() + " state");
node = node.with(node.history().recordStateTransition(null, expectedState, Agent.system, clock.instant()));
curatorTransaction.add(CuratorOperations.create(toPath(node).getAbsolute(), nodeSerializer.toJson(node)));
}
transaction.commit();
for (Node node : nodes)
log.log(LogLevel.INFO, "Added " + node);
return nodes;
} | node = node.with(node.history().recordStateTransition(null, expectedState, Agent.system, clock.instant())); | public List<Node> addNodesInState(List<Node> nodes, Node.State expectedState) {
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
if (node.state() != expectedState)
throw new IllegalArgumentException(node + " is not in the " + node.state() + " state");
node = node.with(node.history().recordStateTransition(null, expectedState, Agent.system, clock.instant()));
curatorTransaction.add(CuratorOperations.create(toPath(node).getAbsolute(), nodeSerializer.toJson(node)));
}
transaction.commit();
for (Node node : nodes)
log.log(LogLevel.INFO, "Added " + node);
return nodes;
} | class CuratorDatabaseClient {
private static final Logger log = Logger.getLogger(CuratorDatabaseClient.class.getName());
private static final Path root = Path.fromString("/provision/v1");
private static final Duration defaultLockTimeout = Duration.ofMinutes(1);
private final NodeSerializer nodeSerializer;
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final CuratorDatabase curatorDatabase;
private final Clock clock;
private final Zone zone;
public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, Zone zone) {
this.nodeSerializer = new NodeSerializer(flavors);
this.zone = zone;
boolean useCache = zone.system().equals(SystemName.cd);
this.curatorDatabase = new CuratorDatabase(curator, root, useCache);
this.clock = clock;
initZK();
}
private void initZK() {
curatorDatabase.create(root);
for (Node.State state : Node.State.values())
curatorDatabase.create(toPath(state));
curatorDatabase.create(inactiveJobsPath());
}
/**
* Adds a set of nodes. Rollbacks/fails transaction if any node is not in the expected state.
*/
/**
* Adds a set of nodes in the initial, provisioned state.
*
* @return the given nodes for convenience.
*/
public List<Node> addNodes(List<Node> nodes) {
return addNodesInState(nodes, Node.State.provisioned);
}
/**
* Removes a node.
*
* @param state the current state of the node
* @param hostName the host name of the node to remove
*/
public void removeNode(Node.State state, String hostName) {
Path path = toPath(state, hostName);
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
curatorTransaction.add(CuratorOperations.delete(path.getAbsolute()));
transaction.commit();
log.log(LogLevel.INFO, "Removed: " + state + " node " + hostName);
}
/**
* Writes the given nodes and returns a copy of the incoming nodes in their persisted state.
*
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @return the nodes in their persisted state
*/
public List<Node> writeTo(List<Node> nodes, Agent agent, Optional<String> reason) {
if (nodes.isEmpty()) return Collections.emptyList();
List<Node> writtenNodes = new ArrayList<>(nodes.size());
try (NestedTransaction nestedTransaction = new NestedTransaction()) {
Map<Node.State, List<Node>> nodesByState = nodes.stream().collect(Collectors.groupingBy(Node::state));
for (Map.Entry<Node.State, List<Node>> entry : nodesByState.entrySet()) {
writtenNodes.addAll(writeTo(entry.getKey(), entry.getValue(), agent, reason, nestedTransaction));
nestedTransaction.commit();
}
}
return writtenNodes;
}
/**
* Writes the given nodes to the given state (whether or not they are already in this state or another),
* and returns a copy of the incoming nodes in their persisted state.
*
* @param toState the state to write the nodes to
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @return the nodes in their persisted state
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason) {
try (NestedTransaction nestedTransaction = new NestedTransaction()) {
List<Node> writtenNodes = writeTo(toState, nodes, agent, reason, nestedTransaction);
nestedTransaction.commit();
return writtenNodes;
}
}
public Node writeTo(Node.State toState, Node node, Agent agent, Optional<String> reason) {
return writeTo(toState, Collections.singletonList(node), agent, reason).get(0);
}
/**
* Adds to the given transaction operations to write the given nodes to the given state,
* and returns a copy of the nodes in the state they will have if the transaction is committed.
*
* @param toState the state to write the nodes to
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @param reason an optional reason to be logged, for humans
* @param transaction the transaction to which write operations are added by this
* @return the nodes in their state as it will be written if committed
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason,
NestedTransaction transaction) {
if (nodes.isEmpty()) return nodes;
List<Node> writtenNodes = new ArrayList<>(nodes.size());
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
Node newNode = new Node(node.openStackId(), node.ipAddresses(), node.additionalIpAddresses(), node.hostname(),
node.parentHostname(), node.flavor(),
newNodeStatus(node, toState),
toState,
toState.isAllocated() ? node.allocation() : Optional.empty(),
node.history().recordStateTransition(node.state(), toState, agent, clock.instant()),
node.type());
curatorTransaction.add(CuratorOperations.delete(toPath(node).getAbsolute()))
.add(CuratorOperations.create(toPath(toState, newNode.hostname()).getAbsolute(), nodeSerializer.toJson(newNode)));
writtenNodes.add(newNode);
}
transaction.onCommitted(() -> {
for (Node node : nodes) {
if (toState != node.state())
log.log(LogLevel.INFO, agent + " moved " + node + " to " + toState + reason.map(s -> ": " + s).orElse(""));
}
});
return writtenNodes;
}
private Status newNodeStatus(Node node, Node.State toState) {
if (node.state() != Node.State.failed && toState == Node.State.failed) return node.status().withIncreasedFailCount();
if (node.state() == Node.State.failed && toState == Node.State.active) return node.status().withDecreasedFailCount();
if (node.state() != Node.State.dirty && toState == Node.State.dirty && !needsFastNodeReuse(zone))
return node.status().withReboot(node.status().reboot().withIncreasedWanted());
return node.status();
}
/** In automated test environments, nodes need to be reused quickly to achieve fast test turnaronud time */
private boolean needsFastNodeReuse(Zone zone) {
return zone.environment() == Environment.staging || zone.environment() == Environment.test;
}
/**
* Returns all nodes which are in one of the given states.
* If no states are given this returns all nodes.
*/
public List<Node> getNodes(Node.State ... states) {
List<Node> nodes = new ArrayList<>();
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
for (String hostname : curatorDatabase.getChildren(toPath(state))) {
Optional<Node> node = getNode(hostname, state);
if (node.isPresent()) nodes.add(node.get());
}
}
return nodes;
}
/**
* Returns all nodes allocated to the given application which are in one of the given states
* If no states are given this returns all nodes.
*/
public List<Node> getNodes(ApplicationId applicationId, Node.State ... states) {
List<Node> nodes = getNodes(states);
nodes.removeIf(node -> ! node.allocation().isPresent() || ! node.allocation().get().owner().equals(applicationId));
return nodes;
}
/**
* Returns a particular node, or empty if this noe is not in any of the given states.
* If no states are given this returns the node if it is present in any state.
*/
public Optional<Node> getNode(String hostname, Node.State ... states) {
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
Optional<byte[]> nodeData = curatorDatabase.getData(toPath(state, hostname));
if (nodeData.isPresent())
return nodeData.map((data) -> nodeSerializer.fromJson(state, data));
}
return Optional.empty();
}
private Path toPath(Node.State nodeState) { return root.append(toDir(nodeState)); }
private Path toPath(Node node) {
return root.append(toDir(node.state())).append(node.hostname());
}
private Path toPath(Node.State nodeState, String nodeName) {
return root.append(toDir(nodeState)).append(nodeName);
}
/** Creates an returns the path to the lock for this application */
private Path lockPath(ApplicationId application) {
Path lockPath =
root
.append("locks")
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curatorDatabase.create(lockPath);
return lockPath;
}
private String toDir(Node.State state) {
switch (state) {
case active: return "allocated";
case dirty: return "dirty";
case failed: return "failed";
case inactive: return "deallocated";
case parked : return "parked";
case provisioned: return "provisioned";
case ready: return "ready";
case reserved: return "reserved";
default: throw new RuntimeException("Node state " + state + " does not map to a directory name");
}
}
/** Acquires the single cluster-global, reentrant lock for all non-active nodes */
public Lock lockInactive() {
return lock(root.append("locks").append("unallocatedLock"), defaultLockTimeout);
}
/** Acquires the single cluster-global, reentrant lock for active nodes of this application */
public Lock lock(ApplicationId application) {
return lock(application, defaultLockTimeout);
}
/** Acquires the single cluster-global, reentrant lock with the specified timeout for active nodes of this application */
public Lock lock(ApplicationId application, Duration timeout) {
try {
return lock(lockPath(application), timeout);
}
catch (UncheckedTimeoutException e) {
throw new ApplicationLockException(e);
}
}
private Lock lock(Path path, Duration timeout) {
return curatorDatabase.lock(path, timeout);
}
/**
* Returns a default flavor specific for an application, or empty if not available.
*/
public Optional<String> getDefaultFlavorForApplication(ApplicationId applicationId) {
Optional<byte[]> utf8DefaultFlavor = curatorDatabase.getData(defaultFlavorPath(applicationId));
return utf8DefaultFlavor.map((flavor) -> new String(flavor, StandardCharsets.UTF_8));
}
private Path defaultFlavorPath(ApplicationId applicationId) {
return root.append("defaultFlavor").append(applicationId.serializedForm());
}
public Set<String> readInactiveJobs() {
try {
byte[] data = curatorDatabase.getData(inactiveJobsPath()).get();
if (data.length == 0) return new HashSet<>();
return stringSetSerializer.fromJson(data);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
curatorTransaction.add(CuratorOperations.setData(inactiveJobsPath().getAbsolute(),
stringSetSerializer.toJson(inactiveJobs)));
transaction.commit();
}
public Lock lockInactiveJobs() {
return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout);
}
private Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
} | class CuratorDatabaseClient {
private static final Logger log = Logger.getLogger(CuratorDatabaseClient.class.getName());
private static final Path root = Path.fromString("/provision/v1");
private static final Duration defaultLockTimeout = Duration.ofMinutes(1);
private final NodeSerializer nodeSerializer;
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final CuratorDatabase curatorDatabase;
private final Clock clock;
private final Zone zone;
public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, Zone zone) {
this.nodeSerializer = new NodeSerializer(flavors);
this.zone = zone;
boolean useCache = zone.system().equals(SystemName.cd);
this.curatorDatabase = new CuratorDatabase(curator, root, useCache);
this.clock = clock;
initZK();
}
private void initZK() {
curatorDatabase.create(root);
for (Node.State state : Node.State.values())
curatorDatabase.create(toPath(state));
curatorDatabase.create(inactiveJobsPath());
}
/**
* Adds a set of nodes. Rollbacks/fails transaction if any node is not in the expected state.
*/
/**
* Adds a set of nodes in the initial, provisioned state.
*
* @return the given nodes for convenience.
*/
public List<Node> addNodes(List<Node> nodes) {
return addNodesInState(nodes, Node.State.provisioned);
}
/**
* Removes a node.
*
* @param state the current state of the node
* @param hostName the host name of the node to remove
*/
public void removeNode(Node.State state, String hostName) {
Path path = toPath(state, hostName);
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
curatorTransaction.add(CuratorOperations.delete(path.getAbsolute()));
transaction.commit();
log.log(LogLevel.INFO, "Removed: " + state + " node " + hostName);
}
/**
* Writes the given nodes and returns a copy of the incoming nodes in their persisted state.
*
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @return the nodes in their persisted state
*/
public List<Node> writeTo(List<Node> nodes, Agent agent, Optional<String> reason) {
if (nodes.isEmpty()) return Collections.emptyList();
List<Node> writtenNodes = new ArrayList<>(nodes.size());
try (NestedTransaction nestedTransaction = new NestedTransaction()) {
Map<Node.State, List<Node>> nodesByState = nodes.stream().collect(Collectors.groupingBy(Node::state));
for (Map.Entry<Node.State, List<Node>> entry : nodesByState.entrySet()) {
writtenNodes.addAll(writeTo(entry.getKey(), entry.getValue(), agent, reason, nestedTransaction));
nestedTransaction.commit();
}
}
return writtenNodes;
}
/**
* Writes the given nodes to the given state (whether or not they are already in this state or another),
* and returns a copy of the incoming nodes in their persisted state.
*
* @param toState the state to write the nodes to
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @return the nodes in their persisted state
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason) {
try (NestedTransaction nestedTransaction = new NestedTransaction()) {
List<Node> writtenNodes = writeTo(toState, nodes, agent, reason, nestedTransaction);
nestedTransaction.commit();
return writtenNodes;
}
}
public Node writeTo(Node.State toState, Node node, Agent agent, Optional<String> reason) {
return writeTo(toState, Collections.singletonList(node), agent, reason).get(0);
}
/**
* Adds to the given transaction operations to write the given nodes to the given state,
* and returns a copy of the nodes in the state they will have if the transaction is committed.
*
* @param toState the state to write the nodes to
* @param nodes the list of nodes to write
* @param agent the agent causing this change
* @param reason an optional reason to be logged, for humans
* @param transaction the transaction to which write operations are added by this
* @return the nodes in their state as it will be written if committed
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason,
NestedTransaction transaction) {
if (nodes.isEmpty()) return nodes;
List<Node> writtenNodes = new ArrayList<>(nodes.size());
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
Node newNode = new Node(node.openStackId(), node.ipAddresses(), node.additionalIpAddresses(), node.hostname(),
node.parentHostname(), node.flavor(),
newNodeStatus(node, toState),
toState,
toState.isAllocated() ? node.allocation() : Optional.empty(),
node.history().recordStateTransition(node.state(), toState, agent, clock.instant()),
node.type());
curatorTransaction.add(CuratorOperations.delete(toPath(node).getAbsolute()))
.add(CuratorOperations.create(toPath(toState, newNode.hostname()).getAbsolute(), nodeSerializer.toJson(newNode)));
writtenNodes.add(newNode);
}
transaction.onCommitted(() -> {
for (Node node : nodes) {
if (toState != node.state())
log.log(LogLevel.INFO, agent + " moved " + node + " to " + toState + reason.map(s -> ": " + s).orElse(""));
}
});
return writtenNodes;
}
private Status newNodeStatus(Node node, Node.State toState) {
if (node.state() != Node.State.failed && toState == Node.State.failed) return node.status().withIncreasedFailCount();
if (node.state() == Node.State.failed && toState == Node.State.active) return node.status().withDecreasedFailCount();
if (node.state() != Node.State.dirty && toState == Node.State.dirty && !needsFastNodeReuse(zone))
return node.status().withReboot(node.status().reboot().withIncreasedWanted());
return node.status();
}
/** In automated test environments, nodes need to be reused quickly to achieve fast test turnaronud time */
private boolean needsFastNodeReuse(Zone zone) {
return zone.environment() == Environment.staging || zone.environment() == Environment.test;
}
/**
* Returns all nodes which are in one of the given states.
* If no states are given this returns all nodes.
*/
public List<Node> getNodes(Node.State ... states) {
List<Node> nodes = new ArrayList<>();
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
for (String hostname : curatorDatabase.getChildren(toPath(state))) {
Optional<Node> node = getNode(hostname, state);
if (node.isPresent()) nodes.add(node.get());
}
}
return nodes;
}
/**
* Returns all nodes allocated to the given application which are in one of the given states
* If no states are given this returns all nodes.
*/
public List<Node> getNodes(ApplicationId applicationId, Node.State ... states) {
List<Node> nodes = getNodes(states);
nodes.removeIf(node -> ! node.allocation().isPresent() || ! node.allocation().get().owner().equals(applicationId));
return nodes;
}
/**
* Returns a particular node, or empty if this noe is not in any of the given states.
* If no states are given this returns the node if it is present in any state.
*/
public Optional<Node> getNode(String hostname, Node.State ... states) {
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
Optional<byte[]> nodeData = curatorDatabase.getData(toPath(state, hostname));
if (nodeData.isPresent())
return nodeData.map((data) -> nodeSerializer.fromJson(state, data));
}
return Optional.empty();
}
private Path toPath(Node.State nodeState) { return root.append(toDir(nodeState)); }
private Path toPath(Node node) {
return root.append(toDir(node.state())).append(node.hostname());
}
private Path toPath(Node.State nodeState, String nodeName) {
return root.append(toDir(nodeState)).append(nodeName);
}
/** Creates an returns the path to the lock for this application */
private Path lockPath(ApplicationId application) {
Path lockPath =
root
.append("locks")
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curatorDatabase.create(lockPath);
return lockPath;
}
private String toDir(Node.State state) {
switch (state) {
case active: return "allocated";
case dirty: return "dirty";
case failed: return "failed";
case inactive: return "deallocated";
case parked : return "parked";
case provisioned: return "provisioned";
case ready: return "ready";
case reserved: return "reserved";
default: throw new RuntimeException("Node state " + state + " does not map to a directory name");
}
}
/** Acquires the single cluster-global, reentrant lock for all non-active nodes */
public Lock lockInactive() {
return lock(root.append("locks").append("unallocatedLock"), defaultLockTimeout);
}
/** Acquires the single cluster-global, reentrant lock for active nodes of this application */
public Lock lock(ApplicationId application) {
return lock(application, defaultLockTimeout);
}
/** Acquires the single cluster-global, reentrant lock with the specified timeout for active nodes of this application */
public Lock lock(ApplicationId application, Duration timeout) {
try {
return lock(lockPath(application), timeout);
}
catch (UncheckedTimeoutException e) {
throw new ApplicationLockException(e);
}
}
private Lock lock(Path path, Duration timeout) {
return curatorDatabase.lock(path, timeout);
}
/**
* Returns a default flavor specific for an application, or empty if not available.
*/
public Optional<String> getDefaultFlavorForApplication(ApplicationId applicationId) {
Optional<byte[]> utf8DefaultFlavor = curatorDatabase.getData(defaultFlavorPath(applicationId));
return utf8DefaultFlavor.map((flavor) -> new String(flavor, StandardCharsets.UTF_8));
}
private Path defaultFlavorPath(ApplicationId applicationId) {
return root.append("defaultFlavor").append(applicationId.serializedForm());
}
public Set<String> readInactiveJobs() {
try {
byte[] data = curatorDatabase.getData(inactiveJobsPath()).get();
if (data.length == 0) return new HashSet<>();
return stringSetSerializer.fromJson(data);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
NestedTransaction transaction = new NestedTransaction();
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
curatorTransaction.add(CuratorOperations.setData(inactiveJobsPath().getAbsolute(),
stringSetSerializer.toJson(inactiveJobs)));
transaction.commit();
}
public Lock lockInactiveJobs() {
return lock(root.append("locks").append("inactiveJobsLock"), defaultLockTimeout);
}
private Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
} |
I think we should avoid getting all nodes from zk if 'node' is not a Docker host. | public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
try (Mutex lock = lockUnallocated()) {
List<Node> removed = getChildNodes(hostname).stream()
.filter(this::allowedToRemove)
.collect(Collectors.toList());
if (allowedToRemove(node)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
} | List<Node> removed = getChildNodes(hostname).stream() | public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
return removeRecursively(node, false);
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()));
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return db.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<String> trustedNetworks = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
break;
case host:
trustedNetworks.add("172.17.0.0/16");
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childNodes(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return db.getDefaultFlavorForApplication(applicationId);
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return db.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return db.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
private boolean allowedToRemove(Node nodeToRemove) {
List<Node.State> legalStates = nodeToRemove.flavor().getType() == Flavor.Type.DOCKER_CONTAINER ?
Arrays.asList(Node.State.provisioned, Node.State.ready) :
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException(String.format("%s can only be removed from following states: %s",
nodeToRemove.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
/*
* Temporary feature toggle to enable/disable dynamic docker allocation
* TODO: Remove when enabled in all zones
*/
public boolean dynamicAllocationEnabled() {
return curator.exists(Path.fromString("/provision/v1/dynamicDockerAllocation"));
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()));
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return db.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<String> trustedNetworks = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
break;
case host:
trustedNetworks.add("172.17.0.0/16");
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childNodes(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return db.getDefaultFlavorForApplication(applicationId);
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return db.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return db.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used to enable a smooth rollout of dynamic docker flavor allocations. Once we have switch
* everything this can be simplified to only deleting the node.
*
* Should only be called by node-admin for docker containers
*/
public List<Node> markNodeAvailableForNewAllocation(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be a docker container node");
} else if (node.state() != Node.State.dirty) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be in state dirty, but was in " + node.state());
}
if (dynamicAllocationEnabled()) {
return removeRecursively(node, true);
} else {
return setReady(Collections.singletonList(node));
}
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
private List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
List<Node> removed = node.type() != NodeType.host ?
new ArrayList<>() :
getChildNodes(node.hostname()).stream()
.filter(child -> force || verifyRemovalIsAllowed(child, true))
.collect(Collectors.toList());
if (force || verifyRemovalIsAllowed(node, false)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
}
/**
* Allowed to a node delete if:
* Non-docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|ready
*/
private boolean verifyRemovalIsAllowed(Node nodeToRemove, boolean deletingAsChild) {
/*if (nodeToRemove.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !deletingAsChild) {
if (nodeToRemove.state() != Node.State.ready) {
throw new IllegalArgumentException(
String.format("Docker container node %s can only be removed when in state ready", nodeToRemove.hostname()));
}
} else */ if (nodeToRemove.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
List<Node.State> legalStates = Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.ready);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
nodeToRemove.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
} else {
List<Node.State> legalStates = Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException(String.format("Node %s can only be removed from following states: %s",
nodeToRemove.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
/*
* Temporary feature toggle to enable/disable dynamic docker allocation
* TODO: Remove when enabled in all zones
*/
public boolean dynamicAllocationEnabled() {
return curator.exists(Path.fromString("/provision/v1/dynamicDockerAllocation"));
}
} |
Fixed. | public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
try (Mutex lock = lockUnallocated()) {
List<Node> removed = getChildNodes(hostname).stream()
.filter(this::allowedToRemove)
.collect(Collectors.toList());
if (allowedToRemove(node)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
} | List<Node> removed = getChildNodes(hostname).stream() | public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
return removeRecursively(node, false);
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()));
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return db.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<String> trustedNetworks = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
break;
case host:
trustedNetworks.add("172.17.0.0/16");
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childNodes(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return db.getDefaultFlavorForApplication(applicationId);
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return db.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return db.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
private boolean allowedToRemove(Node nodeToRemove) {
List<Node.State> legalStates = nodeToRemove.flavor().getType() == Flavor.Type.DOCKER_CONTAINER ?
Arrays.asList(Node.State.provisioned, Node.State.ready) :
Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException(String.format("%s can only be removed from following states: %s",
nodeToRemove.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
/*
* Temporary feature toggle to enable/disable dynamic docker allocation
* TODO: Remove when enabled in all zones
*/
public boolean dynamicAllocationEnabled() {
return curator.exists(Path.fromString("/provision/v1/dynamicDockerAllocation"));
}
} | class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Curator curator;
private final Clock clock;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final DockerImage dockerImage;
/**
* Creates a node repository form a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), new DockerImage(config.dockerImage()));
}
/**
* Creates a node repository form a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone);
this.curator = curator;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.dockerImage = dockerImage;
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage() { return dockerImage; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return db.getNodes(inState).stream().collect(Collectors.toList());
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/**
* Finds and returns all nodes that are children of the given parent node
*
* @param hostname Parent hostname
* @return List of child nodes
*/
public List<Node> getChildNodes(String hostname) {
return db.getNodes().stream()
.filter(node -> node.parentHostname()
.map(parentHostname -> parentHostname.equals(hostname))
.orElse(false))
.collect(Collectors.toList());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns a set of nodes that should be trusted by the given node.
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<String> trustedNetworks = new HashSet<>();
node.allocation().ifPresent(allocation -> trustedNodes.addAll(candidates.owner(allocation.owner()).asList()));
trustedNodes.addAll(getConfigNodes());
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.parentNodes(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
break;
case proxy:
break;
case host:
trustedNetworks.add("172.17.0.0/16");
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = new NodeList(getNodes());
if (children) {
return candidates.childNodes(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
} else {
return Collections.singletonList(getNodeAcl(node, candidates));
}
}
/** Get config node by hostname */
public Optional<Node> getConfigNode(String hostname) {
return getConfigNodes().stream()
.filter(n -> hostname.equals(n.hostname()))
.findFirst();
}
/** Get default flavor override for an application, if present. */
public Optional<String> getDefaultFlavorOverride(ApplicationId applicationId) {
return db.getDefaultFlavorForApplication(applicationId);
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodesInState(nodes, Node.State.reserved);
}
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
public List<Node> addNodes(List<Node> nodes) {
for (Node node : nodes) {
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists");
}
try (Mutex lock = lockUnallocated()) {
return db.addNodes(nodes);
}
}
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes) {
for (Node node : nodes)
if (node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not dirty.");
try (Mutex lock = lockUnallocated()) {
return db.writeTo(Node.State.ready, nodes, Agent.system, Optional.empty());
}
}
public Node setReady(String hostname) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady)).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes) {
return performOn(NodeListFilter.from(nodes), this::setDirty);
}
/** Move a single node to the dirty state */
public Node setDirty(Node node) {
return db.writeTo(Node.State.dirty, node, Agent.system, Optional.empty());
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(String hostname) {
Node nodeToDirty = getNode(hostname, Node.State.provisioned, Node.State.failed, Node.State.parked).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": No such node in the provisioned, failed or parked state"));
if (nodeToDirty.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + hostname + ": It has a hardware failure");
return setDirty(nodeToDirty);
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, Agent agent, String reason) {
return move(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent) {
return move(hostname, Node.State.active, agent, Optional.empty());
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = getChildNodes(hostname).stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, toState, agent, reason));
return moved;
}
private Node move(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used to enable a smooth rollout of dynamic docker flavor allocations. Once we have switch
* everything this can be simplified to only deleting the node.
*
* Should only be called by node-admin for docker containers
*/
public List<Node> markNodeAvailableForNewAllocation(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be a docker container node");
} else if (node.state() != Node.State.dirty) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be in state dirty, but was in " + node.state());
}
if (dynamicAllocationEnabled()) {
return removeRecursively(node, true);
} else {
return setReady(Collections.singletonList(node));
}
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
private List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockUnallocated()) {
List<Node> removed = node.type() != NodeType.host ?
new ArrayList<>() :
getChildNodes(node.hostname()).stream()
.filter(child -> force || verifyRemovalIsAllowed(child, true))
.collect(Collectors.toList());
if (force || verifyRemovalIsAllowed(node, false)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
}
/**
* Allowed to a node delete if:
* Non-docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|ready
*/
private boolean verifyRemovalIsAllowed(Node nodeToRemove, boolean deletingAsChild) {
/*if (nodeToRemove.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !deletingAsChild) {
if (nodeToRemove.state() != Node.State.ready) {
throw new IllegalArgumentException(
String.format("Docker container node %s can only be removed when in state ready", nodeToRemove.hostname()));
}
} else */ if (nodeToRemove.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
List<Node.State> legalStates = Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.ready);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
nodeToRemove.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
} else {
List<Node.State> legalStates = Arrays.asList(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(nodeToRemove.state())) {
throw new IllegalArgumentException(String.format("Node %s can only be removed from following states: %s",
nodeToRemove.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockUnallocated()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
public List<Node> getConfigNodes() {
return Arrays.stream(curator.connectionSpec().split(","))
.map(hostPort -> hostPort.split(":")[0])
.map(host -> createNode(host, host, Optional.empty(),
flavors.getFlavorOrThrow("v-4-8-100"),
NodeType.config))
.collect(Collectors.toList());
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to changing the set of ready nodes */
public Mutex lockUnallocated() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
private Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated();
}
/*
* Temporary feature toggle to enable/disable dynamic docker allocation
* TODO: Remove when enabled in all zones
*/
public boolean dynamicAllocationEnabled() {
return curator.exists(Path.fromString("/provision/v1/dynamicDockerAllocation"));
}
} |
cache? please rename. | private void computeFragmentHosts() throws Exception {
for (int i = fragments.size() - 1; i >= 0; --i) {
PlanFragment fragment = fragments.get(i);
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
if (fragment.getDataPartition() == DataPartition.UNPARTITIONED) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
LOG.warn("DataPartition UNPARTITIONED, no scanNode Backend");
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
continue;
}
PlanNode leftMostNode = findLeftmostNode(fragment.getPlanRoot());
boolean hasUnionNode = containsUnionNode(fragment.getPlanRoot());
if (!(leftMostNode instanceof ScanNode) && !hasUnionNode) {
PlanFragmentId inputFragmentIdx =
fragments.get(i).getChild(0).getFragmentId();
int doris_exchange_instances = -1;
if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) {
doris_exchange_instances = ConnectContext.get().getSessionVariable().getDorisExchangeInstances();
}
if (doris_exchange_instances > 0 && fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams.size() > doris_exchange_instances) {
Set<TNetworkAddress> cache = new HashSet<TNetworkAddress>();
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
if (!cache.contains(execParams.host)) {
cache.add(execParams.host);
}
}
List<TNetworkAddress> hosts = new ArrayList<TNetworkAddress>();
hosts.addAll(cache);
Collections.shuffle(hosts, instanceRandom);
for (int index = 0; index < doris_exchange_instances; index++) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()), 0, params);
params.instanceExecParams.add(instanceParam);
}
} else {
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host,0, params);
params.instanceExecParams.add(instanceParam);
}
}
Collections.shuffle(params.instanceExecParams, instanceRandom);
continue;
}
if (bucketSeqToAddress.size() > 0 && isColocateJoin(fragment.getPlanRoot())) {
for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, bucketSeqToAddress.get(scanRanges.getKey()), 0, params);
Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue();
for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRanges.entrySet()) {
instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue());
}
params.instanceExecParams.add(instanceParam);
}
} else {
Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator();
int parallelExecInstanceNum = fragment.getParallel_exec_num();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
TNetworkAddress key = (TNetworkAddress) entry.getKey();
Map<Integer, List<TScanRangeParams>> value = (Map<Integer, List<TScanRangeParams>>) entry.getValue();
for (Integer planNodeId : value.keySet()) {
List<TScanRangeParams> perNodeScanRanges = value.get(planNodeId);
int expectedInstanceNum = 1;
if (parallelExecInstanceNum > 1) {
expectedInstanceNum = Math.min(perNodeScanRanges.size(), parallelExecInstanceNum);
}
List<List<TScanRangeParams>> perInstanceScanRanges = ListUtil.splitBySize(perNodeScanRanges,
expectedInstanceNum);
for (List<TScanRangeParams> scanRangeParams : perInstanceScanRanges) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, key, 0, params);
instanceParam.perNodeScanRanges.put(planNodeId, scanRangeParams);
params.instanceExecParams.add(instanceParam);
}
}
}
}
if (params.instanceExecParams.isEmpty()) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
}
}
} | Set<TNetworkAddress> cache = new HashSet<TNetworkAddress>(); | private void computeFragmentHosts() throws Exception {
for (int i = fragments.size() - 1; i >= 0; --i) {
PlanFragment fragment = fragments.get(i);
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
if (fragment.getDataPartition() == DataPartition.UNPARTITIONED) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
LOG.warn("DataPartition UNPARTITIONED, no scanNode Backend");
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
continue;
}
PlanNode leftMostNode = findLeftmostNode(fragment.getPlanRoot());
boolean hasUnionNode = containsUnionNode(fragment.getPlanRoot());
if (!(leftMostNode instanceof ScanNode) && !hasUnionNode) {
PlanFragmentId inputFragmentIdx =
fragments.get(i).getChild(0).getFragmentId();
int exchangeInstances = -1;
if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable() != null) {
exchangeInstances = ConnectContext.get().getSessionVariable().getExchangeInstanceParallel();
}
if (exchangeInstances > 0 && fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams.size() > exchangeInstances) {
Set<TNetworkAddress> hostSet = Sets.newHashSet();
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
hostSet.add(execParams.host);
}
List<TNetworkAddress> hosts = Lists.newArrayList(hostSet);
Collections.shuffle(hosts, instanceRandom);
for (int index = 0; index < exchangeInstances; index++) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, hosts.get(index % hosts.size()), 0, params);
params.instanceExecParams.add(instanceParam);
}
} else {
for (FInstanceExecParam execParams: fragmentExecParamsMap.get(inputFragmentIdx).instanceExecParams) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execParams.host, 0, params);
params.instanceExecParams.add(instanceParam);
}
}
Collections.shuffle(params.instanceExecParams, instanceRandom);
continue;
}
if (bucketSeqToAddress.size() > 0 && isColocateJoin(fragment.getPlanRoot())) {
for (Map.Entry<Integer, Map<Integer, List<TScanRangeParams>>> scanRanges : bucketSeqToScanRange.entrySet()) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, bucketSeqToAddress.get(scanRanges.getKey()), 0, params);
Map<Integer, List<TScanRangeParams>> nodeScanRanges = scanRanges.getValue();
for (Map.Entry<Integer, List<TScanRangeParams>> nodeScanRange : nodeScanRanges.entrySet()) {
instanceParam.perNodeScanRanges.put(nodeScanRange.getKey(), nodeScanRange.getValue());
}
params.instanceExecParams.add(instanceParam);
}
} else {
Iterator iter = fragmentExecParamsMap.get(fragment.getFragmentId()).scanRangeAssignment.entrySet().iterator();
int parallelExecInstanceNum = fragment.getParallel_exec_num();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
TNetworkAddress key = (TNetworkAddress) entry.getKey();
Map<Integer, List<TScanRangeParams>> value = (Map<Integer, List<TScanRangeParams>>) entry.getValue();
for (Integer planNodeId : value.keySet()) {
List<TScanRangeParams> perNodeScanRanges = value.get(planNodeId);
int expectedInstanceNum = 1;
if (parallelExecInstanceNum > 1) {
expectedInstanceNum = Math.min(perNodeScanRanges.size(), parallelExecInstanceNum);
}
List<List<TScanRangeParams>> perInstanceScanRanges = ListUtil.splitBySize(perNodeScanRanges,
expectedInstanceNum);
for (List<TScanRangeParams> scanRangeParams : perInstanceScanRanges) {
FInstanceExecParam instanceParam = new FInstanceExecParam(null, key, 0, params);
instanceParam.perNodeScanRanges.put(planNodeId, scanRangeParams);
params.instanceExecParams.add(instanceParam);
}
}
}
}
if (params.instanceExecParams.isEmpty()) {
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostport = SimpleScheduler.getHost(this.idToBackend, backendIdRef);
if (execHostport == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostport, backendIdRef.getRef());
FInstanceExecParam instanceParam = new FInstanceExecParam(null, execHostport,
0, params);
params.instanceExecParams.add(instanceParam);
}
}
} | class Coordinator {
private static final Logger LOG = LogManager.getLogger(Coordinator.class);
private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private static String localIP = FrontendOptions.getLocalHostAddress();
private static Random instanceRandom = new Random();
Status queryStatus = new Status();
Map<TNetworkAddress, Long> addressToBackendID = Maps.newHashMap();
private ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of();
private TDescriptorTable descTable;
private TQueryGlobals queryGlobals = new TQueryGlobals();
private TQueryOptions queryOptions;
private TNetworkAddress coordAddress;
private Lock lock = new ReentrantLock();
private boolean returnedAllResults;
private RuntimeProfile queryProfile;
private List<RuntimeProfile> fragmentProfile;
private Map<PlanFragmentId, FragmentExecParams> fragmentExecParamsMap = Maps.newHashMap();
private List<PlanFragment> fragments;
private List<BackendExecState> backendExecStates = Lists.newArrayList();
private ResultReceiver receiver;
private ConcurrentMap<TUniqueId, BackendExecState> backendExecStateMap =
Maps.newConcurrentMap();
private List<ScanNode> scanNodes;
private Set<TUniqueId> instanceIds = Sets.newHashSet();
private MarkedCountDownLatch<TUniqueId, Long> profileDoneSignal;
private boolean isBlockQuery;
private int numReceivedRows = 0;
private List<String> deltaUrls;
private Map<String, String> loadCounters;
private String trackingUrl;
private List<String> exportFiles;
private List<TTabletCommitInfo> commitInfos = Lists.newArrayList();
private long jobId = -1;
private TUniqueId queryId;
private TResourceInfo tResourceInfo;
private boolean needReport;
private String clusterName;
private final TUniqueId nextInstanceId;
public Coordinator(ConnectContext context, Analyzer analyzer, Planner planner) {
this.isBlockQuery = planner.isBlockQuery();
this.queryId = context.queryId();
this.fragments = planner.getFragments();
this.scanNodes = planner.getScanNodes();
this.descTable = analyzer.getDescTbl().toThrift();
this.returnedAllResults = false;
this.queryOptions = context.getSessionVariable().toThrift();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
if (context.getSessionVariable().getTimeZone().equals("CST")) {
this.queryGlobals.setTime_zone(TimeUtils.DEFAULT_TIME_ZONE);
} else {
this.queryGlobals.setTime_zone(context.getSessionVariable().getTimeZone());
}
this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(),
context.getSessionVariable().getResourceGroup());
this.needReport = context.getSessionVariable().isReportSucc();
this.clusterName = context.getClusterName();
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public Coordinator(Long jobId, TUniqueId queryId, DescriptorTable descTable,
List<PlanFragment> fragments, List<ScanNode> scanNodes, String cluster, String timezone) {
this.isBlockQuery = true;
this.jobId = jobId;
this.queryId = queryId;
this.descTable = descTable.toThrift();
this.fragments = fragments;
this.scanNodes = scanNodes;
this.queryOptions = new TQueryOptions();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
this.queryGlobals.setTime_zone(timezone);
this.tResourceInfo = new TResourceInfo("", "");
this.needReport = true;
this.clusterName = cluster;
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public long getJobId() {
return jobId;
}
public TUniqueId getQueryId() {
return queryId;
}
public void setQueryId(TUniqueId queryId) {
this.queryId = queryId;
}
public void setQueryType(TQueryType type) {
this.queryOptions.setQuery_type(type);
}
public Status getExecStatus() {
return queryStatus;
}
public RuntimeProfile getQueryProfile() {
return queryProfile;
}
public List<String> getDeltaUrls() {
return deltaUrls;
}
public Map<String, String> getLoadCounters() {
return loadCounters;
}
public String getTrackingUrl() {
return trackingUrl;
}
public void setExecMemoryLimit(long execMemoryLimit) {
this.queryOptions.setMem_limit(execMemoryLimit);
}
public void setTimeout(int timeout) {
this.queryOptions.setQuery_timeout(timeout);
}
public void clearExportStatus() {
lock.lock();
try {
this.backendExecStates.clear();
this.backendExecStateMap.clear();
this.queryStatus.setStatus(new Status());
if (this.exportFiles == null) {
this.exportFiles = Lists.newArrayList();
}
this.exportFiles.clear();
} finally {
lock.unlock();
}
}
public List<TTabletCommitInfo> getCommitInfos() {
return commitInfos;
}
private void prepare() {
for (PlanFragment fragment : fragments) {
fragmentExecParamsMap.put(fragment.getFragmentId(), new FragmentExecParams(fragment));
}
for (PlanFragment fragment : fragments) {
if (!(fragment.getSink() instanceof DataStreamSink)) {
continue;
}
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getDestFragment().getFragmentId());
params.inputFragments.add(fragment.getFragmentId());
}
coordAddress = new TNetworkAddress(localIP, Config.rpc_port);
int fragmentSize = fragments.size();
queryProfile = new RuntimeProfile("Execution Profile " + DebugUtil.printId(queryId));
fragmentProfile = new ArrayList<RuntimeProfile>();
for (int i = 0; i < fragmentSize; i ++) {
fragmentProfile.add(new RuntimeProfile("Fragment " + i));
queryProfile.addChild(fragmentProfile.get(i));
}
this.idToBackend = Catalog.getCurrentSystemInfo().getBackendsInCluster(clusterName);
if (LOG.isDebugEnabled()) {
LOG.debug("idToBackend size={}", idToBackend.size());
for (Map.Entry<Long, Backend> entry : idToBackend.entrySet()) {
Long backendID = entry.getKey();
Backend backend = entry.getValue();
LOG.debug("backend: {}-{}-{}", backendID, backend.getHost(), backend.getBePort());
}
}
}
private void lock() {
lock.lock();
}
private void unlock() {
lock.unlock();
}
private void traceInstance() {
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
int idx = 0;
sb.append("query id=").append(DebugUtil.printId(queryId)).append(",");
sb.append("fragment=[");
for (Map.Entry<PlanFragmentId, FragmentExecParams> entry : fragmentExecParamsMap.entrySet()) {
if (idx++ != 0) {
sb.append(",");
}
sb.append(entry.getKey());
entry.getValue().appendTo(sb);
}
sb.append("]");
LOG.debug(sb.toString());
}
}
public void exec() throws Exception {
if (!scanNodes.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, planNode: {}",
DebugUtil.printId(queryId), scanNodes.get(0).treeToThrift());
}
if (!fragments.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, fragment: {}",
DebugUtil.printId(queryId), fragments.get(0).toThrift());
}
prepare();
computeScanRangeAssignment();
computeFragmentExecParams();
traceInstance();
PlanFragmentId topId = fragments.get(0).getFragmentId();
FragmentExecParams topParams = fragmentExecParamsMap.get(topId);
if (topParams.fragment.getSink() instanceof ResultSink) {
receiver = new ResultReceiver(
topParams.instanceExecParams.get(0).instanceId,
addressToBackendID.get(topParams.instanceExecParams.get(0).host),
toBrpcHost(topParams.instanceExecParams.get(0).host),
queryOptions.query_timeout * 1000);
} else {
this.queryOptions.setIs_report_success(true);
deltaUrls = Lists.newArrayList();
loadCounters = Maps.newHashMap();
}
profileDoneSignal = new MarkedCountDownLatch<TUniqueId, Long>(instanceIds.size());
for (TUniqueId instanceId : instanceIds) {
profileDoneSignal.addMark(instanceId, -1L /* value is meaningless */);
}
lock();
try {
int backendId = 0;
int profileFragmentId = 0;
long memoryLimit = queryOptions.getMem_limit();
for (PlanFragment fragment : fragments) {
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
int instanceNum = params.instanceExecParams.size();
Preconditions.checkState(instanceNum > 0);
List<TExecPlanFragmentParams> tParams = params.toThrift(backendId);
List<Pair<BackendExecState, Future<PExecPlanFragmentResult>>> futures = Lists.newArrayList();
if (colocateFragmentIds.contains(fragment.getFragmentId().asInt())) {
int rate = Math.min(Config.query_colocate_join_memory_limit_penalty_factor, instanceNum);
long newmemory = memoryLimit / rate;
for (TExecPlanFragmentParams tParam : tParams) {
tParam.query_options.setMem_limit(newmemory);
}
}
int instanceId = 0;
for (TExecPlanFragmentParams tParam : tParams) {
BackendExecState execState =
new BackendExecState(fragment.getFragmentId(), instanceId++,
profileFragmentId, tParam, this.addressToBackendID);
backendExecStates.add(execState);
backendExecStateMap.put(tParam.params.getFragment_instance_id(), execState);
futures.add(Pair.create(execState, execState.execRemoteFragmentAsync()));
backendId++;
}
for (Pair<BackendExecState, Future<PExecPlanFragmentResult>> pair : futures) {
TStatusCode code = TStatusCode.INTERNAL_ERROR;
String errMsg = null;
try {
PExecPlanFragmentResult result = pair.second.get(Config.remote_fragment_exec_timeout_ms,
TimeUnit.MILLISECONDS);
code = TStatusCode.findByValue(result.status.status_code);
if (result.status.error_msgs != null && !result.status.error_msgs.isEmpty()) {
errMsg = result.status.error_msgs.get(0);
}
} catch (ExecutionException e) {
LOG.warn("catch a execute exception", e);
code = TStatusCode.THRIFT_RPC_ERROR;
} catch (InterruptedException e) {
LOG.warn("catch a interrupt exception", e);
code = TStatusCode.INTERNAL_ERROR;
} catch (TimeoutException e) {
LOG.warn("catch a timeout exception", e);
code = TStatusCode.TIMEOUT;
}
if (code != TStatusCode.OK) {
if (errMsg == null) {
errMsg = "exec rpc error. backend id: " + pair.first.backendId;
}
queryStatus.setStatus(errMsg);
LOG.warn("exec plan fragment failed, errmsg={}, fragmentId={}, backend={}:{}",
errMsg, fragment.getFragmentId(),
pair.first.address.hostname, pair.first.address.port);
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
switch (code) {
case TIMEOUT:
throw new UserException("query timeout. backend id: " + pair.first.backendId);
case THRIFT_RPC_ERROR:
SimpleScheduler.updateBlacklistBackends(pair.first.backendId);
throw new RpcException("rpc failed. backend id: " + pair.first.backendId);
default:
throw new UserException(errMsg);
}
}
}
profileFragmentId += 1;
}
attachInstanceProfileToFragmentProfile();
} finally {
unlock();
}
}
public List<String> getExportFiles() {
return exportFiles;
}
void updateExportFiles(List<String> files) {
lock.lock();
try {
if (exportFiles == null) {
exportFiles = Lists.newArrayList();
}
exportFiles.addAll(files);
} finally {
lock.unlock();
}
}
void updateDeltas(List<String> urls) {
lock.lock();
try {
deltaUrls.addAll(urls);
} finally {
lock.unlock();
}
}
private void updateLoadCounters(Map<String, String> newLoadCounters) {
lock.lock();
try {
long numRowsNormal = 0L;
String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal = Long.valueOf(value);
}
long numRowsAbnormal = 0L;
value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal = Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal += Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal += Long.valueOf(value);
}
this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal);
this.loadCounters.put(LoadEtlTask.DPP_ABNORMAL_ALL, "" + numRowsAbnormal);
} finally {
lock.unlock();
}
}
private void updateCommitInfos(List<TTabletCommitInfo> commitInfos) {
lock.lock();
try {
this.commitInfos.addAll(commitInfos);
} finally {
lock.unlock();
}
}
private void updateStatus(Status status, TUniqueId instanceId) {
lock.lock();
try {
if (returnedAllResults && status.isCancelled()) {
return;
}
if (status.ok()) {
return;
}
if (!queryStatus.ok()) {
return;
}
queryStatus.setStatus(status);
LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}",
jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN");
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
} finally {
lock.unlock();
}
}
public RowBatch getNext() throws Exception {
if (receiver == null) {
throw new UserException("There is no receiver.");
}
RowBatch resultBatch;
Status status = new Status();
resultBatch = receiver.getNext(status);
if (!status.ok()) {
LOG.warn("get next fail, need cancel. query id: {}", DebugUtil.printId(queryId));
}
updateStatus(status, null /* no instance id */);
Status copyStatus = null;
lock();
try {
copyStatus = new Status(queryStatus);
} finally {
unlock();
}
if (!copyStatus.ok()) {
if (Strings.isNullOrEmpty(copyStatus.getErrorMsg())) {
copyStatus.rewriteErrorMsg();
}
if (copyStatus.isRpcError()) {
throw new RpcException(copyStatus.getErrorMsg());
} else {
String errMsg = copyStatus.getErrorMsg();
LOG.warn("query failed: {}", errMsg);
int hostIndex = errMsg.indexOf("host");
if (hostIndex != -1) {
errMsg = errMsg.substring(0, hostIndex);
}
throw new UserException(errMsg);
}
}
if (resultBatch.isEos()) {
this.returnedAllResults = true;
Long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
boolean hasLimit = numLimitRows > 0;
if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) {
LOG.debug("no block query, return num >= limit rows, need cancel");
cancelInternal(PPlanFragmentCancelReason.LIMIT_REACH);
}
} else {
numReceivedRows += resultBatch.getBatch().getRowsSize();
}
return resultBatch;
}
public void cancel() {
lock();
try {
if (!queryStatus.ok()) {
return;
} else {
queryStatus.setStatus(Status.CANCELLED);
}
LOG.warn("cancel execution of query, this is outside invoke");
cancelInternal(PPlanFragmentCancelReason.USER_CANCEL);
} finally {
unlock();
}
}
private void cancelInternal(PPlanFragmentCancelReason cancelReason) {
if (null != receiver) {
receiver.cancel();
}
cancelRemoteFragmentsAsync(cancelReason);
if (profileDoneSignal != null) {
profileDoneSignal.countDownToZero(new Status());
LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e->DebugUtil.printId(e.getKey())).toArray());
}
}
private void cancelRemoteFragmentsAsync(PPlanFragmentCancelReason cancelReason) {
for (BackendExecState backendExecState : backendExecStates) {
TNetworkAddress address = backendExecState.getBackendAddress();
LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} ip={} port={} fragment instance id={}, reason: {}",
backendExecState.initiated, backendExecState.done, backendExecState.hasCanceled,
address.hostname, address.port, DebugUtil.printId(backendExecState.getFragmentInstanceId()),
cancelReason.name());
backendExecState.lock();
try {
if (!backendExecState.initiated) {
continue;
}
if (backendExecState.done) {
continue;
}
if (backendExecState.hasCanceled) {
continue;
}
TNetworkAddress brpcAddress = toBrpcHost(address);
try {
BackendServiceProxy.getInstance().cancelPlanFragmentAsync(
brpcAddress, backendExecState.getFragmentInstanceId(), cancelReason);
} catch (RpcException e) {
LOG.warn("cancel plan fragment get a exception, address={}:{}",
brpcAddress.getHostname(), brpcAddress.getPort());
SimpleScheduler.updateBlacklistBackends(addressToBackendID.get(brpcAddress));
}
backendExecState.hasCanceled = true;
} catch (Exception e) {
LOG.warn("catch a exception", e);
} finally {
backendExecState.unlock();
}
}
}
private void computeFragmentExecParams() throws Exception {
computeFragmentHosts();
instanceIds.clear();
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size());
for (int j = 0; j < params.instanceExecParams.size(); ++j) {
TUniqueId instanceId = new TUniqueId();
instanceId.setHi(queryId.hi);
instanceId.setLo(queryId.lo + instanceIds.size() + 1);
params.instanceExecParams.get(j).instanceId = instanceId;
instanceIds.add(instanceId);
}
}
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
PlanFragment destFragment = params.fragment.getDestFragment();
if (destFragment == null) {
continue;
}
FragmentExecParams destParams = fragmentExecParamsMap.get(destFragment.getFragmentId());
DataSink sink = params.fragment.getSink();
PlanNodeId exchId = sink.getExchNodeId();
if (destParams.perExchNumSenders.get(exchId.asInt()) == null) {
destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size());
} else {
destParams.perExchNumSenders.put(exchId.asInt(),
params.instanceExecParams.size() + destParams.perExchNumSenders.get(exchId.asInt()));
}
for (int j = 0; j < destParams.instanceExecParams.size(); ++j) {
TPlanFragmentDestination dest = new TPlanFragmentDestination();
dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId;
dest.server = toRpcHost(destParams.instanceExecParams.get(j).host);
dest.setBrpc_server(toBrpcHost(destParams.instanceExecParams.get(j).host));
params.destinations.add(dest);
}
}
}
private TNetworkAddress toRpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
TNetworkAddress dest = new TNetworkAddress(backend.getHost(), backend.getBeRpcPort());
return dest;
}
private TNetworkAddress toBrpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
if (backend.getBrpcPort() < 0) {
return null;
}
return new TNetworkAddress(backend.getHost(), backend.getBrpcPort());
}
private boolean containsUnionNode(PlanNode node) {
if (node instanceof UnionNode) {
return true;
}
for (PlanNode child : node.getChildren()) {
if (child instanceof ExchangeNode) {
continue;
} else if (child instanceof UnionNode) {
return true;
} else {
return containsUnionNode(child);
}
}
return false;
}
private boolean isColocateJoin(PlanNode node) {
if (Config.disable_colocate_join) {
return false;
}
if (ConnectContext.get() != null) {
if (ConnectContext.get().getSessionVariable().isDisableColocateJoin()) {
return false;
}
}
if (colocateFragmentIds.contains(node.getFragmentId().asInt())) {
return true;
}
if (node instanceof HashJoinNode) {
HashJoinNode joinNode = (HashJoinNode) node;
if (joinNode.isColocate()) {
colocateFragmentIds.add(joinNode.getFragmentId().asInt());
return true;
}
}
for (PlanNode childNode : node.getChildren()) {
return isColocateJoin(childNode);
}
return false;
}
private PlanNode findLeftmostNode(PlanNode plan) {
PlanNode newPlan = plan;
while (newPlan.getChildren().size() != 0 && !(newPlan instanceof ExchangeNode)) {
newPlan = newPlan.getChild(0);
}
return newPlan;
}
private <K, V> V findOrInsert(HashMap<K, V> m, final K key, final V defaultVal) {
V value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private List<TScanRangeParams> findOrInsert(Map<Integer, List<TScanRangeParams>> m, Integer key,
ArrayList<TScanRangeParams> defaultVal) {
List<TScanRangeParams> value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private long getScanRangeLength(final TScanRange scanRange) {
return 1;
}
private void computeScanRangeAssignment() throws Exception {
for (ScanNode scanNode : scanNodes) {
List<TScanRangeLocations> locations = scanNode.getScanRangeLocations(0);
if (locations == null) {
continue;
}
FragmentScanRangeAssignment assignment =
fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment;
if (isColocateJoin(scanNode.getFragment().getPlanRoot())) {
computeScanRangeAssignmentByColocate((OlapScanNode) scanNode, assignment);
} else {
computeScanRangeAssignmentByScheduler(scanNode, locations, assignment);
}
}
}
private void computeScanRangeAssignmentByColocate(
final OlapScanNode scanNode,
FragmentScanRangeAssignment assignment) throws Exception {
for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) {
List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq);
if (!bucketSeqToAddress.containsKey(bucketSeq)) {
getExecHostPortForBucketSeq(locations.get(0), bucketSeq);
}
for(TScanRangeLocations location: locations) {
Map<Integer, List<TScanRangeParams>> scanRanges =
findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList =
findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = location.scan_range;
scanRangeParamsList.add(scanRangeParams);
}
}
}
private void getExecHostPortForBucketSeq(TScanRangeLocations seqLocation, Integer bucketSeq) throws Exception {
int randomLocation = new Random().nextInt(seqLocation.locations.size());
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(seqLocation.locations.get(randomLocation).backend_id, seqLocation.locations, this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
this.bucketSeqToAddress.put(bucketSeq, execHostPort);
}
private void computeScanRangeAssignmentByScheduler(
final ScanNode scanNode,
final List<TScanRangeLocations> locations,
FragmentScanRangeAssignment assignment) throws Exception {
HashMap<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap();
for (TScanRangeLocations scanRangeLocations : locations) {
Long minAssignedBytes = Long.MAX_VALUE;
TScanRangeLocation minLocation = null;
for (final TScanRangeLocation location : scanRangeLocations.getLocations()) {
Long assignedBytes = findOrInsert(assignedBytesPerHost, location.server, 0L);
if (assignedBytes < minAssignedBytes) {
minAssignedBytes = assignedBytes;
minLocation = location;
}
}
Long scanRangeLength = getScanRangeLength(scanRangeLocations.scan_range);
assignedBytesPerHost.put(minLocation.server,
assignedBytesPerHost.get(minLocation.server) + scanRangeLength);
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(minLocation.backend_id,
scanRangeLocations.getLocations(), this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(assignment, execHostPort,
new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(),
new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = scanRangeLocations.scan_range;
scanRangeParams.setVolume_id(minLocation.volume_id);
scanRangeParamsList.add(scanRangeParams);
}
}
public void updateFragmentExecStatus(TReportExecStatusParams params) {
if (params.backend_num >= backendExecStates.size()) {
LOG.warn("unknown backend number: {}, expected less than: {}",
params.backend_num, backendExecStates.size());
return;
}
boolean done = false;
BackendExecState execState = backendExecStates.get(params.backend_num);
execState.lock();
try {
if (execState.done) {
return;
}
if (params.isSetProfile()) {
execState.profile.update(params.profile);
}
done = params.done;
execState.done = params.done;
} finally {
execState.unlock();
}
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
execState.profile().prettyPrint(builder, "");
LOG.debug("profile for query_id={} instance_id={}\n{}",
DebugUtil.printId(queryId),
DebugUtil.printId(params.getFragment_instance_id()),
builder.toString());
}
Status status = new Status(params.status);
if (!(returnedAllResults && status.isCancelled()) && !status.ok()) {
LOG.warn("one instance report fail, query_id={} instance_id={}",
DebugUtil.printId(queryId), DebugUtil.printId(params.getFragment_instance_id()));
updateStatus(status, params.getFragment_instance_id());
}
if (done) {
if (params.isSetDelta_urls()) {
updateDeltas(params.getDelta_urls());
}
if (params.isSetLoad_counters()) {
updateLoadCounters(params.getLoad_counters());
}
if (params.isSetTracking_url()) {
trackingUrl = params.tracking_url;
}
if (params.isSetExport_files()) {
updateExportFiles(params.export_files);
}
if (params.isSetCommitInfos()) {
updateCommitInfos(params.getCommitInfos());
}
profileDoneSignal.markedCountDown(params.getFragment_instance_id(), -1L);
}
if (params.isSetLoaded_rows()) {
Catalog.getCurrentCatalog().getLoadManager().updateJobLoadedRows(jobId, params.query_id, params.loaded_rows);
}
return;
}
public void endProfile() {
if (backendExecStates.isEmpty()) {
return;
}
if (needReport) {
try {
profileDoneSignal.await(2, TimeUnit.SECONDS);
} catch (InterruptedException e1) {
LOG.warn("signal await error", e1);
}
}
for (int i = 1; i < fragmentProfile.size(); ++i) {
fragmentProfile.get(i).sortChildren();
}
}
public boolean join(int seconds) {
try {
return profileDoneSignal.await(seconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
}
return false;
}
public boolean isDone() {
return profileDoneSignal.getCount() == 0;
}
class FragmentScanRangeAssignment
extends HashMap<TNetworkAddress, Map<Integer, List<TScanRangeParams>>> {
}
class BucketSeqToScanRange extends HashMap<Integer, Map<Integer, List<TScanRangeParams>>> {
}
private BucketSeqToScanRange bucketSeqToScanRange = new BucketSeqToScanRange();
private Map<Integer, TNetworkAddress> bucketSeqToAddress = Maps.newHashMap();
private Set<Integer> colocateFragmentIds = new HashSet<>();
public class BackendExecState {
TExecPlanFragmentParams rpcParams;
private PlanFragmentId fragmentId;
private int instanceId;
private boolean initiated;
private boolean done;
private boolean hasCanceled;
private Lock lock = new ReentrantLock();
private int profileFragmentId;
RuntimeProfile profile;
TNetworkAddress address;
Long backendId;
public int profileFragmentId() {
return profileFragmentId;
}
public boolean initiated() {
return initiated;
}
public RuntimeProfile profile() {
return profile;
}
public void lock() {
lock.lock();
}
public void unlock() {
lock.unlock();
}
public int getInstanceId() {
return instanceId;
}
public PlanFragmentId getFragmentId() {
return fragmentId;
}
public BackendExecState(PlanFragmentId fragmentId, int instanceId, int profileFragmentId,
TExecPlanFragmentParams rpcParams, Map<TNetworkAddress, Long> addressToBackendID) {
this.profileFragmentId = profileFragmentId;
this.fragmentId = fragmentId;
this.instanceId = instanceId;
this.rpcParams = rpcParams;
this.initiated = false;
this.done = false;
this.address = fragmentExecParamsMap.get(fragmentId).instanceExecParams.get(instanceId).host;
this.backendId = addressToBackendID.get(address);
String name = "Instance " + DebugUtil.printId(fragmentExecParamsMap.get(fragmentId)
.instanceExecParams.get(instanceId).instanceId) + " (host=" + address + ")";
this.profile = new RuntimeProfile(name);
this.hasCanceled = false;
}
public TNetworkAddress getBackendAddress() {
return address;
}
public TUniqueId getFragmentInstanceId() {
return this.rpcParams.params.getFragment_instance_id();
}
public Future<PExecPlanFragmentResult> execRemoteFragmentAsync() throws TException, RpcException {
TNetworkAddress brpcAddress = null;
try {
brpcAddress = toBrpcHost(address);
} catch (Exception e) {
throw new TException(e.getMessage());
}
initiated = true;
try {
return BackendServiceProxy.getInstance().execPlanFragmentAsync(brpcAddress, rpcParams);
} catch (RpcException e) {
SimpleScheduler.updateBlacklistBackends(backendId);
throw e;
}
}
}
protected class FragmentExecParams {
public PlanFragment fragment;
public List<TPlanFragmentDestination> destinations = Lists.newArrayList();
public Map<Integer, Integer> perExchNumSenders = Maps.newHashMap();
public List<PlanFragmentId> inputFragments = Lists.newArrayList();
public List<FInstanceExecParam> instanceExecParams = Lists.newArrayList();
public FragmentScanRangeAssignment scanRangeAssignment = new FragmentScanRangeAssignment();
public FragmentExecParams(PlanFragment fragment) {
this.fragment = fragment;
}
List<TExecPlanFragmentParams> toThrift(int backendNum) {
List<TExecPlanFragmentParams> paramsList = Lists.newArrayList();
for (int i = 0; i < instanceExecParams.size(); ++i) {
final FInstanceExecParam instanceExecParam = instanceExecParams.get(i);
TExecPlanFragmentParams params = new TExecPlanFragmentParams();
params.setProtocol_version(PaloInternalServiceVersion.V1);
params.setFragment(fragment.toThrift());
params.setDesc_tbl(descTable);
params.setParams(new TPlanFragmentExecParams());
params.setResource_info(tResourceInfo);
params.params.setQuery_id(queryId);
params.params.setFragment_instance_id(instanceExecParam.instanceId);
Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges;
if (scanRanges == null) {
scanRanges = Maps.newHashMap();
}
params.params.setPer_node_scan_ranges(scanRanges);
params.params.setPer_exch_num_senders(perExchNumSenders);
params.params.setDestinations(destinations);
params.params.setSender_id(i);
params.params.setNum_senders(instanceExecParams.size());
params.setCoord(coordAddress);
params.setBackend_num(backendNum++);
params.setQuery_globals(queryGlobals);
params.setQuery_options(queryOptions);
params.params.setSend_query_statistics_with_every_batch(
fragment.isTransferQueryStatisticsWithEveryBatch());
if (queryOptions.getQuery_type() == TQueryType.LOAD) {
LoadErrorHub.Param param = Catalog.getCurrentCatalog().getLoadInstance().getLoadErrorHubInfo();
if (param != null) {
TLoadErrorHubInfo info = param.toThrift();
if (info != null) {
params.setLoad_error_hub_info(info);
}
}
}
paramsList.add(params);
}
return paramsList;
}
public void appendScanRange(StringBuilder sb, List<TScanRangeParams> params) {
sb.append("range=[");
int idx = 0;
for (TScanRangeParams range : params) {
TPaloScanRange paloScanRange = range.getScan_range().getPalo_scan_range();
if (paloScanRange != null) {
if (idx++ != 0) {
sb.append(",");
}
sb.append("{tid=").append(paloScanRange.getTablet_id())
.append(",ver=").append(paloScanRange.getVersion()).append("}");
}
TEsScanRange esScanRange = range.getScan_range().getEs_scan_range();
if (esScanRange != null) {
sb.append("{ index=").append(esScanRange.getIndex())
.append(", shardid=").append(esScanRange.getShard_id())
.append("}");
}
}
sb.append("]");
}
public void appendTo(StringBuilder sb) {
sb.append("{plan=");
fragment.getPlanRoot().appendTrace(sb);
sb.append(",instance=[");
for (int i = 0; i < instanceExecParams.size(); ++i) {
if (i != 0) {
sb.append(",");
}
TNetworkAddress address = instanceExecParams.get(i).host;
Map<Integer, List<TScanRangeParams>> scanRanges =
scanRangeAssignment.get(address);
sb.append("{");
sb.append("id=").append(DebugUtil.printId(instanceExecParams.get(i).instanceId));
sb.append(",host=").append(instanceExecParams.get(i).host);
if (scanRanges == null) {
sb.append("}");
continue;
}
sb.append(",range=[");
int eIdx = 0;
for (Map.Entry<Integer, List<TScanRangeParams>> entry : scanRanges.entrySet()) {
if (eIdx++ != 0) {
sb.append(",");
}
sb.append("id").append(entry.getKey()).append(",");
appendScanRange(sb, entry.getValue());
}
sb.append("]");
sb.append("}");
}
sb.append("]");
sb.append("}");
}
} | class Coordinator {
private static final Logger LOG = LogManager.getLogger(Coordinator.class);
private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private static String localIP = FrontendOptions.getLocalHostAddress();
private static Random instanceRandom = new Random();
Status queryStatus = new Status();
Map<TNetworkAddress, Long> addressToBackendID = Maps.newHashMap();
private ImmutableMap<Long, Backend> idToBackend = ImmutableMap.of();
private TDescriptorTable descTable;
private TQueryGlobals queryGlobals = new TQueryGlobals();
private TQueryOptions queryOptions;
private TNetworkAddress coordAddress;
private Lock lock = new ReentrantLock();
private boolean returnedAllResults;
private RuntimeProfile queryProfile;
private List<RuntimeProfile> fragmentProfile;
private Map<PlanFragmentId, FragmentExecParams> fragmentExecParamsMap = Maps.newHashMap();
private List<PlanFragment> fragments;
private List<BackendExecState> backendExecStates = Lists.newArrayList();
private ResultReceiver receiver;
private ConcurrentMap<TUniqueId, BackendExecState> backendExecStateMap =
Maps.newConcurrentMap();
private List<ScanNode> scanNodes;
private Set<TUniqueId> instanceIds = Sets.newHashSet();
private MarkedCountDownLatch<TUniqueId, Long> profileDoneSignal;
private boolean isBlockQuery;
private int numReceivedRows = 0;
private List<String> deltaUrls;
private Map<String, String> loadCounters;
private String trackingUrl;
private List<String> exportFiles;
private List<TTabletCommitInfo> commitInfos = Lists.newArrayList();
private long jobId = -1;
private TUniqueId queryId;
private TResourceInfo tResourceInfo;
private boolean needReport;
private String clusterName;
private final TUniqueId nextInstanceId;
public Coordinator(ConnectContext context, Analyzer analyzer, Planner planner) {
this.isBlockQuery = planner.isBlockQuery();
this.queryId = context.queryId();
this.fragments = planner.getFragments();
this.scanNodes = planner.getScanNodes();
this.descTable = analyzer.getDescTbl().toThrift();
this.returnedAllResults = false;
this.queryOptions = context.getSessionVariable().toThrift();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
if (context.getSessionVariable().getTimeZone().equals("CST")) {
this.queryGlobals.setTime_zone(TimeUtils.DEFAULT_TIME_ZONE);
} else {
this.queryGlobals.setTime_zone(context.getSessionVariable().getTimeZone());
}
this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(),
context.getSessionVariable().getResourceGroup());
this.needReport = context.getSessionVariable().isReportSucc();
this.clusterName = context.getClusterName();
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public Coordinator(Long jobId, TUniqueId queryId, DescriptorTable descTable,
List<PlanFragment> fragments, List<ScanNode> scanNodes, String cluster, String timezone) {
this.isBlockQuery = true;
this.jobId = jobId;
this.queryId = queryId;
this.descTable = descTable.toThrift();
this.fragments = fragments;
this.scanNodes = scanNodes;
this.queryOptions = new TQueryOptions();
this.queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
this.queryGlobals.setTimestamp_ms(new Date().getTime());
this.queryGlobals.setTime_zone(timezone);
this.tResourceInfo = new TResourceInfo("", "");
this.needReport = true;
this.clusterName = cluster;
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
nextInstanceId.setLo(queryId.lo + 1);
}
public long getJobId() {
return jobId;
}
public TUniqueId getQueryId() {
return queryId;
}
public void setQueryId(TUniqueId queryId) {
this.queryId = queryId;
}
public void setQueryType(TQueryType type) {
this.queryOptions.setQuery_type(type);
}
public Status getExecStatus() {
return queryStatus;
}
public RuntimeProfile getQueryProfile() {
return queryProfile;
}
public List<String> getDeltaUrls() {
return deltaUrls;
}
public Map<String, String> getLoadCounters() {
return loadCounters;
}
public String getTrackingUrl() {
return trackingUrl;
}
public void setExecMemoryLimit(long execMemoryLimit) {
this.queryOptions.setMem_limit(execMemoryLimit);
}
public void setTimeout(int timeout) {
this.queryOptions.setQuery_timeout(timeout);
}
public void clearExportStatus() {
lock.lock();
try {
this.backendExecStates.clear();
this.backendExecStateMap.clear();
this.queryStatus.setStatus(new Status());
if (this.exportFiles == null) {
this.exportFiles = Lists.newArrayList();
}
this.exportFiles.clear();
} finally {
lock.unlock();
}
}
public List<TTabletCommitInfo> getCommitInfos() {
return commitInfos;
}
private void prepare() {
for (PlanFragment fragment : fragments) {
fragmentExecParamsMap.put(fragment.getFragmentId(), new FragmentExecParams(fragment));
}
for (PlanFragment fragment : fragments) {
if (!(fragment.getSink() instanceof DataStreamSink)) {
continue;
}
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getDestFragment().getFragmentId());
params.inputFragments.add(fragment.getFragmentId());
}
coordAddress = new TNetworkAddress(localIP, Config.rpc_port);
int fragmentSize = fragments.size();
queryProfile = new RuntimeProfile("Execution Profile " + DebugUtil.printId(queryId));
fragmentProfile = new ArrayList<RuntimeProfile>();
for (int i = 0; i < fragmentSize; i ++) {
fragmentProfile.add(new RuntimeProfile("Fragment " + i));
queryProfile.addChild(fragmentProfile.get(i));
}
this.idToBackend = Catalog.getCurrentSystemInfo().getBackendsInCluster(clusterName);
if (LOG.isDebugEnabled()) {
LOG.debug("idToBackend size={}", idToBackend.size());
for (Map.Entry<Long, Backend> entry : idToBackend.entrySet()) {
Long backendID = entry.getKey();
Backend backend = entry.getValue();
LOG.debug("backend: {}-{}-{}", backendID, backend.getHost(), backend.getBePort());
}
}
}
private void lock() {
lock.lock();
}
private void unlock() {
lock.unlock();
}
private void traceInstance() {
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
int idx = 0;
sb.append("query id=").append(DebugUtil.printId(queryId)).append(",");
sb.append("fragment=[");
for (Map.Entry<PlanFragmentId, FragmentExecParams> entry : fragmentExecParamsMap.entrySet()) {
if (idx++ != 0) {
sb.append(",");
}
sb.append(entry.getKey());
entry.getValue().appendTo(sb);
}
sb.append("]");
LOG.debug(sb.toString());
}
}
public void exec() throws Exception {
if (!scanNodes.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, planNode: {}",
DebugUtil.printId(queryId), scanNodes.get(0).treeToThrift());
}
if (!fragments.isEmpty()) {
LOG.debug("debug: in Coordinator::exec. query id: {}, fragment: {}",
DebugUtil.printId(queryId), fragments.get(0).toThrift());
}
prepare();
computeScanRangeAssignment();
computeFragmentExecParams();
traceInstance();
PlanFragmentId topId = fragments.get(0).getFragmentId();
FragmentExecParams topParams = fragmentExecParamsMap.get(topId);
if (topParams.fragment.getSink() instanceof ResultSink) {
receiver = new ResultReceiver(
topParams.instanceExecParams.get(0).instanceId,
addressToBackendID.get(topParams.instanceExecParams.get(0).host),
toBrpcHost(topParams.instanceExecParams.get(0).host),
queryOptions.query_timeout * 1000);
} else {
this.queryOptions.setIs_report_success(true);
deltaUrls = Lists.newArrayList();
loadCounters = Maps.newHashMap();
}
profileDoneSignal = new MarkedCountDownLatch<TUniqueId, Long>(instanceIds.size());
for (TUniqueId instanceId : instanceIds) {
profileDoneSignal.addMark(instanceId, -1L /* value is meaningless */);
}
lock();
try {
int backendId = 0;
int profileFragmentId = 0;
long memoryLimit = queryOptions.getMem_limit();
for (PlanFragment fragment : fragments) {
FragmentExecParams params = fragmentExecParamsMap.get(fragment.getFragmentId());
int instanceNum = params.instanceExecParams.size();
Preconditions.checkState(instanceNum > 0);
List<TExecPlanFragmentParams> tParams = params.toThrift(backendId);
List<Pair<BackendExecState, Future<PExecPlanFragmentResult>>> futures = Lists.newArrayList();
if (colocateFragmentIds.contains(fragment.getFragmentId().asInt())) {
int rate = Math.min(Config.query_colocate_join_memory_limit_penalty_factor, instanceNum);
long newmemory = memoryLimit / rate;
for (TExecPlanFragmentParams tParam : tParams) {
tParam.query_options.setMem_limit(newmemory);
}
}
int instanceId = 0;
for (TExecPlanFragmentParams tParam : tParams) {
BackendExecState execState =
new BackendExecState(fragment.getFragmentId(), instanceId++,
profileFragmentId, tParam, this.addressToBackendID);
backendExecStates.add(execState);
backendExecStateMap.put(tParam.params.getFragment_instance_id(), execState);
futures.add(Pair.create(execState, execState.execRemoteFragmentAsync()));
backendId++;
}
for (Pair<BackendExecState, Future<PExecPlanFragmentResult>> pair : futures) {
TStatusCode code = TStatusCode.INTERNAL_ERROR;
String errMsg = null;
try {
PExecPlanFragmentResult result = pair.second.get(Config.remote_fragment_exec_timeout_ms,
TimeUnit.MILLISECONDS);
code = TStatusCode.findByValue(result.status.status_code);
if (result.status.error_msgs != null && !result.status.error_msgs.isEmpty()) {
errMsg = result.status.error_msgs.get(0);
}
} catch (ExecutionException e) {
LOG.warn("catch a execute exception", e);
code = TStatusCode.THRIFT_RPC_ERROR;
} catch (InterruptedException e) {
LOG.warn("catch a interrupt exception", e);
code = TStatusCode.INTERNAL_ERROR;
} catch (TimeoutException e) {
LOG.warn("catch a timeout exception", e);
code = TStatusCode.TIMEOUT;
}
if (code != TStatusCode.OK) {
if (errMsg == null) {
errMsg = "exec rpc error. backend id: " + pair.first.backendId;
}
queryStatus.setStatus(errMsg);
LOG.warn("exec plan fragment failed, errmsg={}, fragmentId={}, backend={}:{}",
errMsg, fragment.getFragmentId(),
pair.first.address.hostname, pair.first.address.port);
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
switch (code) {
case TIMEOUT:
throw new UserException("query timeout. backend id: " + pair.first.backendId);
case THRIFT_RPC_ERROR:
SimpleScheduler.updateBlacklistBackends(pair.first.backendId);
throw new RpcException("rpc failed. backend id: " + pair.first.backendId);
default:
throw new UserException(errMsg);
}
}
}
profileFragmentId += 1;
}
attachInstanceProfileToFragmentProfile();
} finally {
unlock();
}
}
public List<String> getExportFiles() {
return exportFiles;
}
void updateExportFiles(List<String> files) {
lock.lock();
try {
if (exportFiles == null) {
exportFiles = Lists.newArrayList();
}
exportFiles.addAll(files);
} finally {
lock.unlock();
}
}
void updateDeltas(List<String> urls) {
lock.lock();
try {
deltaUrls.addAll(urls);
} finally {
lock.unlock();
}
}
private void updateLoadCounters(Map<String, String> newLoadCounters) {
lock.lock();
try {
long numRowsNormal = 0L;
String value = this.loadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal = Long.valueOf(value);
}
long numRowsAbnormal = 0L;
value = this.loadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal = Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_NORMAL_ALL);
if (value != null) {
numRowsNormal += Long.valueOf(value);
}
value = newLoadCounters.get(LoadEtlTask.DPP_ABNORMAL_ALL);
if (value != null) {
numRowsAbnormal += Long.valueOf(value);
}
this.loadCounters.put(LoadEtlTask.DPP_NORMAL_ALL, "" + numRowsNormal);
this.loadCounters.put(LoadEtlTask.DPP_ABNORMAL_ALL, "" + numRowsAbnormal);
} finally {
lock.unlock();
}
}
private void updateCommitInfos(List<TTabletCommitInfo> commitInfos) {
lock.lock();
try {
this.commitInfos.addAll(commitInfos);
} finally {
lock.unlock();
}
}
private void updateStatus(Status status, TUniqueId instanceId) {
lock.lock();
try {
if (returnedAllResults && status.isCancelled()) {
return;
}
if (status.ok()) {
return;
}
if (!queryStatus.ok()) {
return;
}
queryStatus.setStatus(status);
LOG.warn("one instance report fail throw updateStatus(), need cancel. job id: {}, query id: {}, instance id: {}",
jobId, DebugUtil.printId(queryId), instanceId != null ? DebugUtil.printId(instanceId) : "NaN");
cancelInternal(PPlanFragmentCancelReason.INTERNAL_ERROR);
} finally {
lock.unlock();
}
}
public RowBatch getNext() throws Exception {
if (receiver == null) {
throw new UserException("There is no receiver.");
}
RowBatch resultBatch;
Status status = new Status();
resultBatch = receiver.getNext(status);
if (!status.ok()) {
LOG.warn("get next fail, need cancel. query id: {}", DebugUtil.printId(queryId));
}
updateStatus(status, null /* no instance id */);
Status copyStatus = null;
lock();
try {
copyStatus = new Status(queryStatus);
} finally {
unlock();
}
if (!copyStatus.ok()) {
if (Strings.isNullOrEmpty(copyStatus.getErrorMsg())) {
copyStatus.rewriteErrorMsg();
}
if (copyStatus.isRpcError()) {
throw new RpcException(copyStatus.getErrorMsg());
} else {
String errMsg = copyStatus.getErrorMsg();
LOG.warn("query failed: {}", errMsg);
int hostIndex = errMsg.indexOf("host");
if (hostIndex != -1) {
errMsg = errMsg.substring(0, hostIndex);
}
throw new UserException(errMsg);
}
}
if (resultBatch.isEos()) {
this.returnedAllResults = true;
Long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
boolean hasLimit = numLimitRows > 0;
if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) {
LOG.debug("no block query, return num >= limit rows, need cancel");
cancelInternal(PPlanFragmentCancelReason.LIMIT_REACH);
}
} else {
numReceivedRows += resultBatch.getBatch().getRowsSize();
}
return resultBatch;
}
public void cancel() {
lock();
try {
if (!queryStatus.ok()) {
return;
} else {
queryStatus.setStatus(Status.CANCELLED);
}
LOG.warn("cancel execution of query, this is outside invoke");
cancelInternal(PPlanFragmentCancelReason.USER_CANCEL);
} finally {
unlock();
}
}
private void cancelInternal(PPlanFragmentCancelReason cancelReason) {
if (null != receiver) {
receiver.cancel();
}
cancelRemoteFragmentsAsync(cancelReason);
if (profileDoneSignal != null) {
profileDoneSignal.countDownToZero(new Status());
LOG.info("unfinished instance: {}", profileDoneSignal.getLeftMarks().stream().map(e->DebugUtil.printId(e.getKey())).toArray());
}
}
private void cancelRemoteFragmentsAsync(PPlanFragmentCancelReason cancelReason) {
for (BackendExecState backendExecState : backendExecStates) {
TNetworkAddress address = backendExecState.getBackendAddress();
LOG.debug("cancelRemoteFragments initiated={} done={} hasCanceled={} ip={} port={} fragment instance id={}, reason: {}",
backendExecState.initiated, backendExecState.done, backendExecState.hasCanceled,
address.hostname, address.port, DebugUtil.printId(backendExecState.getFragmentInstanceId()),
cancelReason.name());
backendExecState.lock();
try {
if (!backendExecState.initiated) {
continue;
}
if (backendExecState.done) {
continue;
}
if (backendExecState.hasCanceled) {
continue;
}
TNetworkAddress brpcAddress = toBrpcHost(address);
try {
BackendServiceProxy.getInstance().cancelPlanFragmentAsync(
brpcAddress, backendExecState.getFragmentInstanceId(), cancelReason);
} catch (RpcException e) {
LOG.warn("cancel plan fragment get a exception, address={}:{}",
brpcAddress.getHostname(), brpcAddress.getPort());
SimpleScheduler.updateBlacklistBackends(addressToBackendID.get(brpcAddress));
}
backendExecState.hasCanceled = true;
} catch (Exception e) {
LOG.warn("catch a exception", e);
} finally {
backendExecState.unlock();
}
}
}
private void computeFragmentExecParams() throws Exception {
computeFragmentHosts();
instanceIds.clear();
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size());
for (int j = 0; j < params.instanceExecParams.size(); ++j) {
TUniqueId instanceId = new TUniqueId();
instanceId.setHi(queryId.hi);
instanceId.setLo(queryId.lo + instanceIds.size() + 1);
params.instanceExecParams.get(j).instanceId = instanceId;
instanceIds.add(instanceId);
}
}
for (FragmentExecParams params : fragmentExecParamsMap.values()) {
PlanFragment destFragment = params.fragment.getDestFragment();
if (destFragment == null) {
continue;
}
FragmentExecParams destParams = fragmentExecParamsMap.get(destFragment.getFragmentId());
DataSink sink = params.fragment.getSink();
PlanNodeId exchId = sink.getExchNodeId();
if (destParams.perExchNumSenders.get(exchId.asInt()) == null) {
destParams.perExchNumSenders.put(exchId.asInt(), params.instanceExecParams.size());
} else {
destParams.perExchNumSenders.put(exchId.asInt(),
params.instanceExecParams.size() + destParams.perExchNumSenders.get(exchId.asInt()));
}
for (int j = 0; j < destParams.instanceExecParams.size(); ++j) {
TPlanFragmentDestination dest = new TPlanFragmentDestination();
dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId;
dest.server = toRpcHost(destParams.instanceExecParams.get(j).host);
dest.setBrpc_server(toBrpcHost(destParams.instanceExecParams.get(j).host));
params.destinations.add(dest);
}
}
}
private TNetworkAddress toRpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
TNetworkAddress dest = new TNetworkAddress(backend.getHost(), backend.getBeRpcPort());
return dest;
}
private TNetworkAddress toBrpcHost(TNetworkAddress host) throws Exception {
Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort(
host.getHostname(), host.getPort());
if (backend == null) {
throw new UserException("there is no scanNode Backend");
}
if (backend.getBrpcPort() < 0) {
return null;
}
return new TNetworkAddress(backend.getHost(), backend.getBrpcPort());
}
private boolean containsUnionNode(PlanNode node) {
if (node instanceof UnionNode) {
return true;
}
for (PlanNode child : node.getChildren()) {
if (child instanceof ExchangeNode) {
continue;
} else if (child instanceof UnionNode) {
return true;
} else {
return containsUnionNode(child);
}
}
return false;
}
private boolean isColocateJoin(PlanNode node) {
if (Config.disable_colocate_join) {
return false;
}
if (ConnectContext.get() != null) {
if (ConnectContext.get().getSessionVariable().isDisableColocateJoin()) {
return false;
}
}
if (colocateFragmentIds.contains(node.getFragmentId().asInt())) {
return true;
}
if (node instanceof HashJoinNode) {
HashJoinNode joinNode = (HashJoinNode) node;
if (joinNode.isColocate()) {
colocateFragmentIds.add(joinNode.getFragmentId().asInt());
return true;
}
}
for (PlanNode childNode : node.getChildren()) {
return isColocateJoin(childNode);
}
return false;
}
private PlanNode findLeftmostNode(PlanNode plan) {
PlanNode newPlan = plan;
while (newPlan.getChildren().size() != 0 && !(newPlan instanceof ExchangeNode)) {
newPlan = newPlan.getChild(0);
}
return newPlan;
}
private <K, V> V findOrInsert(HashMap<K, V> m, final K key, final V defaultVal) {
V value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private List<TScanRangeParams> findOrInsert(Map<Integer, List<TScanRangeParams>> m, Integer key,
ArrayList<TScanRangeParams> defaultVal) {
List<TScanRangeParams> value = m.get(key);
if (value == null) {
m.put(key, defaultVal);
value = defaultVal;
}
return value;
}
private long getScanRangeLength(final TScanRange scanRange) {
return 1;
}
private void computeScanRangeAssignment() throws Exception {
for (ScanNode scanNode : scanNodes) {
List<TScanRangeLocations> locations = scanNode.getScanRangeLocations(0);
if (locations == null) {
continue;
}
FragmentScanRangeAssignment assignment =
fragmentExecParamsMap.get(scanNode.getFragmentId()).scanRangeAssignment;
if (isColocateJoin(scanNode.getFragment().getPlanRoot())) {
computeScanRangeAssignmentByColocate((OlapScanNode) scanNode, assignment);
} else {
computeScanRangeAssignmentByScheduler(scanNode, locations, assignment);
}
}
}
private void computeScanRangeAssignmentByColocate(
final OlapScanNode scanNode,
FragmentScanRangeAssignment assignment) throws Exception {
for(Integer bucketSeq: scanNode.bucketSeq2locations.keySet()) {
List<TScanRangeLocations> locations = scanNode.bucketSeq2locations.get(bucketSeq);
if (!bucketSeqToAddress.containsKey(bucketSeq)) {
getExecHostPortForBucketSeq(locations.get(0), bucketSeq);
}
for(TScanRangeLocations location: locations) {
Map<Integer, List<TScanRangeParams>> scanRanges =
findOrInsert(bucketSeqToScanRange, bucketSeq, new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList =
findOrInsert(scanRanges, scanNode.getId().asInt(), new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = location.scan_range;
scanRangeParamsList.add(scanRangeParams);
}
}
}
private void getExecHostPortForBucketSeq(TScanRangeLocations seqLocation, Integer bucketSeq) throws Exception {
int randomLocation = new Random().nextInt(seqLocation.locations.size());
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(seqLocation.locations.get(randomLocation).backend_id, seqLocation.locations, this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
this.bucketSeqToAddress.put(bucketSeq, execHostPort);
}
private void computeScanRangeAssignmentByScheduler(
final ScanNode scanNode,
final List<TScanRangeLocations> locations,
FragmentScanRangeAssignment assignment) throws Exception {
HashMap<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap();
for (TScanRangeLocations scanRangeLocations : locations) {
Long minAssignedBytes = Long.MAX_VALUE;
TScanRangeLocation minLocation = null;
for (final TScanRangeLocation location : scanRangeLocations.getLocations()) {
Long assignedBytes = findOrInsert(assignedBytesPerHost, location.server, 0L);
if (assignedBytes < minAssignedBytes) {
minAssignedBytes = assignedBytes;
minLocation = location;
}
}
Long scanRangeLength = getScanRangeLength(scanRangeLocations.scan_range);
assignedBytesPerHost.put(minLocation.server,
assignedBytesPerHost.get(minLocation.server) + scanRangeLength);
Reference<Long> backendIdRef = new Reference<Long>();
TNetworkAddress execHostPort = SimpleScheduler.getHost(minLocation.backend_id,
scanRangeLocations.getLocations(), this.idToBackend, backendIdRef);
if (execHostPort == null) {
throw new UserException("there is no scanNode Backend");
}
this.addressToBackendID.put(execHostPort, backendIdRef.getRef());
Map<Integer, List<TScanRangeParams>> scanRanges = findOrInsert(assignment, execHostPort,
new HashMap<Integer, List<TScanRangeParams>>());
List<TScanRangeParams> scanRangeParamsList = findOrInsert(scanRanges, scanNode.getId().asInt(),
new ArrayList<TScanRangeParams>());
TScanRangeParams scanRangeParams = new TScanRangeParams();
scanRangeParams.scan_range = scanRangeLocations.scan_range;
scanRangeParams.setVolume_id(minLocation.volume_id);
scanRangeParamsList.add(scanRangeParams);
}
}
public void updateFragmentExecStatus(TReportExecStatusParams params) {
if (params.backend_num >= backendExecStates.size()) {
LOG.warn("unknown backend number: {}, expected less than: {}",
params.backend_num, backendExecStates.size());
return;
}
boolean done = false;
BackendExecState execState = backendExecStates.get(params.backend_num);
execState.lock();
try {
if (execState.done) {
return;
}
if (params.isSetProfile()) {
execState.profile.update(params.profile);
}
done = params.done;
execState.done = params.done;
} finally {
execState.unlock();
}
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
execState.profile().prettyPrint(builder, "");
LOG.debug("profile for query_id={} instance_id={}\n{}",
DebugUtil.printId(queryId),
DebugUtil.printId(params.getFragment_instance_id()),
builder.toString());
}
Status status = new Status(params.status);
if (!(returnedAllResults && status.isCancelled()) && !status.ok()) {
LOG.warn("one instance report fail, query_id={} instance_id={}",
DebugUtil.printId(queryId), DebugUtil.printId(params.getFragment_instance_id()));
updateStatus(status, params.getFragment_instance_id());
}
if (done) {
if (params.isSetDelta_urls()) {
updateDeltas(params.getDelta_urls());
}
if (params.isSetLoad_counters()) {
updateLoadCounters(params.getLoad_counters());
}
if (params.isSetTracking_url()) {
trackingUrl = params.tracking_url;
}
if (params.isSetExport_files()) {
updateExportFiles(params.export_files);
}
if (params.isSetCommitInfos()) {
updateCommitInfos(params.getCommitInfos());
}
profileDoneSignal.markedCountDown(params.getFragment_instance_id(), -1L);
}
if (params.isSetLoaded_rows()) {
Catalog.getCurrentCatalog().getLoadManager().updateJobLoadedRows(jobId, params.query_id, params.loaded_rows);
}
return;
}
public void endProfile() {
if (backendExecStates.isEmpty()) {
return;
}
if (needReport) {
try {
profileDoneSignal.await(2, TimeUnit.SECONDS);
} catch (InterruptedException e1) {
LOG.warn("signal await error", e1);
}
}
for (int i = 1; i < fragmentProfile.size(); ++i) {
fragmentProfile.get(i).sortChildren();
}
}
public boolean join(int seconds) {
try {
return profileDoneSignal.await(seconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
}
return false;
}
public boolean isDone() {
return profileDoneSignal.getCount() == 0;
}
class FragmentScanRangeAssignment
extends HashMap<TNetworkAddress, Map<Integer, List<TScanRangeParams>>> {
}
class BucketSeqToScanRange extends HashMap<Integer, Map<Integer, List<TScanRangeParams>>> {
}
private BucketSeqToScanRange bucketSeqToScanRange = new BucketSeqToScanRange();
private Map<Integer, TNetworkAddress> bucketSeqToAddress = Maps.newHashMap();
private Set<Integer> colocateFragmentIds = new HashSet<>();
public class BackendExecState {
TExecPlanFragmentParams rpcParams;
private PlanFragmentId fragmentId;
private int instanceId;
private boolean initiated;
private boolean done;
private boolean hasCanceled;
private Lock lock = new ReentrantLock();
private int profileFragmentId;
RuntimeProfile profile;
TNetworkAddress address;
Long backendId;
public int profileFragmentId() {
return profileFragmentId;
}
public boolean initiated() {
return initiated;
}
public RuntimeProfile profile() {
return profile;
}
public void lock() {
lock.lock();
}
public void unlock() {
lock.unlock();
}
public int getInstanceId() {
return instanceId;
}
public PlanFragmentId getFragmentId() {
return fragmentId;
}
public BackendExecState(PlanFragmentId fragmentId, int instanceId, int profileFragmentId,
TExecPlanFragmentParams rpcParams, Map<TNetworkAddress, Long> addressToBackendID) {
this.profileFragmentId = profileFragmentId;
this.fragmentId = fragmentId;
this.instanceId = instanceId;
this.rpcParams = rpcParams;
this.initiated = false;
this.done = false;
this.address = fragmentExecParamsMap.get(fragmentId).instanceExecParams.get(instanceId).host;
this.backendId = addressToBackendID.get(address);
String name = "Instance " + DebugUtil.printId(fragmentExecParamsMap.get(fragmentId)
.instanceExecParams.get(instanceId).instanceId) + " (host=" + address + ")";
this.profile = new RuntimeProfile(name);
this.hasCanceled = false;
}
public TNetworkAddress getBackendAddress() {
return address;
}
public TUniqueId getFragmentInstanceId() {
return this.rpcParams.params.getFragment_instance_id();
}
public Future<PExecPlanFragmentResult> execRemoteFragmentAsync() throws TException, RpcException {
TNetworkAddress brpcAddress = null;
try {
brpcAddress = toBrpcHost(address);
} catch (Exception e) {
throw new TException(e.getMessage());
}
initiated = true;
try {
return BackendServiceProxy.getInstance().execPlanFragmentAsync(brpcAddress, rpcParams);
} catch (RpcException e) {
SimpleScheduler.updateBlacklistBackends(backendId);
throw e;
}
}
}
protected class FragmentExecParams {
public PlanFragment fragment;
public List<TPlanFragmentDestination> destinations = Lists.newArrayList();
public Map<Integer, Integer> perExchNumSenders = Maps.newHashMap();
public List<PlanFragmentId> inputFragments = Lists.newArrayList();
public List<FInstanceExecParam> instanceExecParams = Lists.newArrayList();
public FragmentScanRangeAssignment scanRangeAssignment = new FragmentScanRangeAssignment();
public FragmentExecParams(PlanFragment fragment) {
this.fragment = fragment;
}
List<TExecPlanFragmentParams> toThrift(int backendNum) {
List<TExecPlanFragmentParams> paramsList = Lists.newArrayList();
for (int i = 0; i < instanceExecParams.size(); ++i) {
final FInstanceExecParam instanceExecParam = instanceExecParams.get(i);
TExecPlanFragmentParams params = new TExecPlanFragmentParams();
params.setProtocol_version(PaloInternalServiceVersion.V1);
params.setFragment(fragment.toThrift());
params.setDesc_tbl(descTable);
params.setParams(new TPlanFragmentExecParams());
params.setResource_info(tResourceInfo);
params.params.setQuery_id(queryId);
params.params.setFragment_instance_id(instanceExecParam.instanceId);
Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges;
if (scanRanges == null) {
scanRanges = Maps.newHashMap();
}
params.params.setPer_node_scan_ranges(scanRanges);
params.params.setPer_exch_num_senders(perExchNumSenders);
params.params.setDestinations(destinations);
params.params.setSender_id(i);
params.params.setNum_senders(instanceExecParams.size());
params.setCoord(coordAddress);
params.setBackend_num(backendNum++);
params.setQuery_globals(queryGlobals);
params.setQuery_options(queryOptions);
params.params.setSend_query_statistics_with_every_batch(
fragment.isTransferQueryStatisticsWithEveryBatch());
if (queryOptions.getQuery_type() == TQueryType.LOAD) {
LoadErrorHub.Param param = Catalog.getCurrentCatalog().getLoadInstance().getLoadErrorHubInfo();
if (param != null) {
TLoadErrorHubInfo info = param.toThrift();
if (info != null) {
params.setLoad_error_hub_info(info);
}
}
}
paramsList.add(params);
}
return paramsList;
}
public void appendScanRange(StringBuilder sb, List<TScanRangeParams> params) {
sb.append("range=[");
int idx = 0;
for (TScanRangeParams range : params) {
TPaloScanRange paloScanRange = range.getScan_range().getPalo_scan_range();
if (paloScanRange != null) {
if (idx++ != 0) {
sb.append(",");
}
sb.append("{tid=").append(paloScanRange.getTablet_id())
.append(",ver=").append(paloScanRange.getVersion()).append("}");
}
TEsScanRange esScanRange = range.getScan_range().getEs_scan_range();
if (esScanRange != null) {
sb.append("{ index=").append(esScanRange.getIndex())
.append(", shardid=").append(esScanRange.getShard_id())
.append("}");
}
}
sb.append("]");
}
public void appendTo(StringBuilder sb) {
sb.append("{plan=");
fragment.getPlanRoot().appendTrace(sb);
sb.append(",instance=[");
for (int i = 0; i < instanceExecParams.size(); ++i) {
if (i != 0) {
sb.append(",");
}
TNetworkAddress address = instanceExecParams.get(i).host;
Map<Integer, List<TScanRangeParams>> scanRanges =
scanRangeAssignment.get(address);
sb.append("{");
sb.append("id=").append(DebugUtil.printId(instanceExecParams.get(i).instanceId));
sb.append(",host=").append(instanceExecParams.get(i).host);
if (scanRanges == null) {
sb.append("}");
continue;
}
sb.append(",range=[");
int eIdx = 0;
for (Map.Entry<Integer, List<TScanRangeParams>> entry : scanRanges.entrySet()) {
if (eIdx++ != 0) {
sb.append(",");
}
sb.append("id").append(entry.getKey()).append(",");
appendScanRange(sb, entry.getValue());
}
sb.append("]");
sb.append("}");
}
sb.append("]");
sb.append("}");
}
} |
That was the initial thought, but looking at the implementations I concluded that would only yield the value 0 as that is an array. It might be more correct to use summaries.entry(i).getFieldCount() which should give the correct number of fields. As symbols >= fields I used a pragmatic "large enough" approach. @havardpe What say you ? | private void fill(List<FastHit> hits, byte[] slimeBytes) {
Slime slime = BinaryFormat.decode(slimeBytes);
int maxFieldsInhit = slime.symbols();
Inspector summaries = new SlimeAdapter(slime.get().field("docsums"));
summaries.fieldCount();
if ( ! summaries.valid())
throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field");
for (int i = 0; i < hits.size(); i++) {
FastHit hit = hits.get(i);
hit.reserve(maxFieldsInhit);
fill(hit, summaries.entry(i).field("docsum"));
}
} | summaries.fieldCount(); | private void fill(List<FastHit> hits, byte[] slimeBytes) {
Inspector summaries = new SlimeAdapter(BinaryFormat.decode(slimeBytes).get().field("docsums"));
if ( ! summaries.valid())
throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field");
for (int i = 0; i < hits.size(); i++) {
fill(hits.get(i), summaries.entry(i).field("docsum"));
}
} | class GetDocsumsResponseReceiver {
private final BlockingQueue<Client.GetDocsumsResponseOrError> responses;
private final Compressor compressor;
private final Result result;
/** Whether we have already logged/notified about an error - to avoid spamming */
private boolean hasReportedError = false;
/** The number of responses we should receive (and process) before this is complete */
private int outstandingResponses;
public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) {
this.compressor = compressor;
responses = new LinkedBlockingQueue<>(requestCount);
outstandingResponses = requestCount;
this.result = result;
}
/** Called by a thread belonging to the client when a valid response becomes available */
public void receive(Client.GetDocsumsResponseOrError response) {
responses.add(response);
}
private void throwTimeout() throws TimeoutException {
throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding.");
}
/**
* Call this from the dispatcher thread to initiate and complete processing of responses.
* This will block until all responses are available and processed, or to timeout.
*/
public void processResponses(Query query) throws TimeoutException {
try {
while (outstandingResponses > 0) {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
throwTimeout();
}
Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
if (response == null)
throwTimeout();
processResponse(response);
outstandingResponses--;
}
}
catch (InterruptedException e) {
}
}
private void processResponse(Client.GetDocsumsResponseOrError responseOrError) {
if (responseOrError.error().isPresent()) {
if (hasReportedError) return;
String error = responseOrError.error().get();
result.hits().addError(ErrorMessage.createBackendCommunicationError(error));
log.log(Level.WARNING, "Error fetching summary data: "+ error);
}
else {
Client.GetDocsumsResponse response = responseOrError.response().get();
CompressionType compression = CompressionType.valueOf(response.compression());
byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize());
fill(response.hitsContext(), slimeBytes);
}
}
private void fill(FastHit hit, Inspector summary) {
summary.traverse((String name, Inspector value) -> {
hit.setField(name, nativeTypeOf(value));
});
}
private Object nativeTypeOf(Inspector inspector) {
switch (inspector.type()) {
case ARRAY: return inspector;
case OBJECT: return inspector;
case BOOL: return inspector.asBool();
case DATA: return inspector.asData();
case DOUBLE: return inspector.asDouble();
case LONG: return inspector.asLong();
case STRING: return inspector.asString();
case EMPTY : return null;
default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type());
}
}
} | class GetDocsumsResponseReceiver {
private final BlockingQueue<Client.GetDocsumsResponseOrError> responses;
private final Compressor compressor;
private final Result result;
/** Whether we have already logged/notified about an error - to avoid spamming */
private boolean hasReportedError = false;
/** The number of responses we should receive (and process) before this is complete */
private int outstandingResponses;
public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) {
this.compressor = compressor;
responses = new LinkedBlockingQueue<>(requestCount);
outstandingResponses = requestCount;
this.result = result;
}
/** Called by a thread belonging to the client when a valid response becomes available */
public void receive(Client.GetDocsumsResponseOrError response) {
responses.add(response);
}
private void throwTimeout() throws TimeoutException {
throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding.");
}
/**
* Call this from the dispatcher thread to initiate and complete processing of responses.
* This will block until all responses are available and processed, or to timeout.
*/
public void processResponses(Query query) throws TimeoutException {
try {
while (outstandingResponses > 0) {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
throwTimeout();
}
Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
if (response == null)
throwTimeout();
processResponse(response);
outstandingResponses--;
}
}
catch (InterruptedException e) {
}
}
private void processResponse(Client.GetDocsumsResponseOrError responseOrError) {
if (responseOrError.error().isPresent()) {
if (hasReportedError) return;
String error = responseOrError.error().get();
result.hits().addError(ErrorMessage.createBackendCommunicationError(error));
log.log(Level.WARNING, "Error fetching summary data: "+ error);
}
else {
Client.GetDocsumsResponse response = responseOrError.response().get();
CompressionType compression = CompressionType.valueOf(response.compression());
byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize());
fill(response.hitsContext(), slimeBytes);
}
}
private void fill(FastHit hit, Inspector summary) {
hit.reserve(summary.fieldCount());
summary.traverse((String name, Inspector value) -> {
hit.setField(name, nativeTypeOf(value));
});
}
private Object nativeTypeOf(Inspector inspector) {
switch (inspector.type()) {
case ARRAY: return inspector;
case OBJECT: return inspector;
case BOOL: return inspector.asBool();
case DATA: return inspector.asData();
case DOUBLE: return inspector.asDouble();
case LONG: return inspector.asLong();
case STRING: return inspector.asString();
case EMPTY : return null;
default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type());
}
}
} |
If possible, I suggest performing the reserve operation inside the inner fill based on the actual number of fields in the docsum we are processing. I also suggest accounting for the 0.75 load factor somewhere to avoid a forced re-hash (possibly inside getFieldMap, since capacity is a less useful concept here than 'number of fields I can store before overhead happens'). | private void fill(List<FastHit> hits, byte[] slimeBytes) {
Slime slime = BinaryFormat.decode(slimeBytes);
int maxFieldsInhit = slime.symbols();
Inspector summaries = new SlimeAdapter(slime.get().field("docsums"));
summaries.fieldCount();
if ( ! summaries.valid())
throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field");
for (int i = 0; i < hits.size(); i++) {
FastHit hit = hits.get(i);
hit.reserve(maxFieldsInhit);
fill(hit, summaries.entry(i).field("docsum"));
}
} | summaries.fieldCount(); | private void fill(List<FastHit> hits, byte[] slimeBytes) {
Inspector summaries = new SlimeAdapter(BinaryFormat.decode(slimeBytes).get().field("docsums"));
if ( ! summaries.valid())
throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field");
for (int i = 0; i < hits.size(); i++) {
fill(hits.get(i), summaries.entry(i).field("docsum"));
}
} | class GetDocsumsResponseReceiver {
private final BlockingQueue<Client.GetDocsumsResponseOrError> responses;
private final Compressor compressor;
private final Result result;
/** Whether we have already logged/notified about an error - to avoid spamming */
private boolean hasReportedError = false;
/** The number of responses we should receive (and process) before this is complete */
private int outstandingResponses;
public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) {
this.compressor = compressor;
responses = new LinkedBlockingQueue<>(requestCount);
outstandingResponses = requestCount;
this.result = result;
}
/** Called by a thread belonging to the client when a valid response becomes available */
public void receive(Client.GetDocsumsResponseOrError response) {
responses.add(response);
}
private void throwTimeout() throws TimeoutException {
throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding.");
}
/**
* Call this from the dispatcher thread to initiate and complete processing of responses.
* This will block until all responses are available and processed, or to timeout.
*/
public void processResponses(Query query) throws TimeoutException {
try {
while (outstandingResponses > 0) {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
throwTimeout();
}
Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
if (response == null)
throwTimeout();
processResponse(response);
outstandingResponses--;
}
}
catch (InterruptedException e) {
}
}
private void processResponse(Client.GetDocsumsResponseOrError responseOrError) {
if (responseOrError.error().isPresent()) {
if (hasReportedError) return;
String error = responseOrError.error().get();
result.hits().addError(ErrorMessage.createBackendCommunicationError(error));
log.log(Level.WARNING, "Error fetching summary data: "+ error);
}
else {
Client.GetDocsumsResponse response = responseOrError.response().get();
CompressionType compression = CompressionType.valueOf(response.compression());
byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize());
fill(response.hitsContext(), slimeBytes);
}
}
private void fill(FastHit hit, Inspector summary) {
summary.traverse((String name, Inspector value) -> {
hit.setField(name, nativeTypeOf(value));
});
}
private Object nativeTypeOf(Inspector inspector) {
switch (inspector.type()) {
case ARRAY: return inspector;
case OBJECT: return inspector;
case BOOL: return inspector.asBool();
case DATA: return inspector.asData();
case DOUBLE: return inspector.asDouble();
case LONG: return inspector.asLong();
case STRING: return inspector.asString();
case EMPTY : return null;
default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type());
}
}
} | class GetDocsumsResponseReceiver {
private final BlockingQueue<Client.GetDocsumsResponseOrError> responses;
private final Compressor compressor;
private final Result result;
/** Whether we have already logged/notified about an error - to avoid spamming */
private boolean hasReportedError = false;
/** The number of responses we should receive (and process) before this is complete */
private int outstandingResponses;
public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) {
this.compressor = compressor;
responses = new LinkedBlockingQueue<>(requestCount);
outstandingResponses = requestCount;
this.result = result;
}
/** Called by a thread belonging to the client when a valid response becomes available */
public void receive(Client.GetDocsumsResponseOrError response) {
responses.add(response);
}
private void throwTimeout() throws TimeoutException {
throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding.");
}
/**
* Call this from the dispatcher thread to initiate and complete processing of responses.
* This will block until all responses are available and processed, or to timeout.
*/
public void processResponses(Query query) throws TimeoutException {
try {
while (outstandingResponses > 0) {
long timeLeftMs = query.getTimeLeft();
if (timeLeftMs <= 0) {
throwTimeout();
}
Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS);
if (response == null)
throwTimeout();
processResponse(response);
outstandingResponses--;
}
}
catch (InterruptedException e) {
}
}
private void processResponse(Client.GetDocsumsResponseOrError responseOrError) {
if (responseOrError.error().isPresent()) {
if (hasReportedError) return;
String error = responseOrError.error().get();
result.hits().addError(ErrorMessage.createBackendCommunicationError(error));
log.log(Level.WARNING, "Error fetching summary data: "+ error);
}
else {
Client.GetDocsumsResponse response = responseOrError.response().get();
CompressionType compression = CompressionType.valueOf(response.compression());
byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize());
fill(response.hitsContext(), slimeBytes);
}
}
private void fill(FastHit hit, Inspector summary) {
hit.reserve(summary.fieldCount());
summary.traverse((String name, Inspector value) -> {
hit.setField(name, nativeTypeOf(value));
});
}
private Object nativeTypeOf(Inspector inspector) {
switch (inspector.type()) {
case ARRAY: return inspector;
case OBJECT: return inspector;
case BOOL: return inspector.asBool();
case DATA: return inspector.asData();
case DOUBLE: return inspector.asDouble();
case LONG: return inspector.asLong();
case STRING: return inspector.asString();
case EMPTY : return null;
default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type());
}
}
} |
I think this could be made simpler, but ok. | private static void validateZones(List<Step> steps) {
List<DeclaredZone> zones = new ArrayList<>();
steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.forEach(zones::add);
steps.stream()
.filter(step -> step instanceof ParallelZones)
.map(ParallelZones.class::cast)
.flatMap(parallelZones -> parallelZones.zones().stream())
.forEach(zones::add);
Set<DeclaredZone> unique = new HashSet<>();
List<RegionName> duplicates = zones.stream()
.filter(z -> z.environment() == Environment.prod && !unique.add(z))
.map(z -> z.region().get())
.collect(Collectors.toList());
if (!duplicates.isEmpty()) {
throw new IllegalArgumentException("All declared regions must be unique, but found these " +
"duplicated regions: " + duplicates);
}
} | private static void validateZones(List<Step> steps) {
List<DeclaredZone> zones = new ArrayList<>();
steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.forEach(zones::add);
steps.stream()
.filter(step -> step instanceof ParallelZones)
.map(ParallelZones.class::cast)
.flatMap(parallelZones -> parallelZones.zones().stream())
.forEach(zones::add);
Set<DeclaredZone> unique = new HashSet<>();
List<RegionName> duplicates = zones.stream()
.filter(z -> z.environment() == Environment.prod && !unique.add(z))
.map(z -> z.region().get())
.collect(Collectors.toList());
if (!duplicates.isEmpty()) {
throw new IllegalArgumentException("All declared regions must be unique, but found these " +
"duplicated regions: " + duplicates);
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns only the DeclaredZone deployment steps of this in the order they will be performed */
public List<DeclaredZone> zones() {
return steps.stream().filter(step -> step instanceof DeclaredZone).map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay")) {
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
} else if (stepTag.getTagName().equals("parallel")) {
List<DeclaredZone> zones = new ArrayList<>();
for (Element regionTag : XML.getChildren(stepTag)) {
zones.add(readDeclaredZone(environment, regionTag));
}
steps.add(new ParallelZones(zones));
} else {
steps.add(readDeclaredZone(environment, stepTag));
}
}
} else {
steps.add(new DeclaredZone(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
}
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag));
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
/** The list of zones to deploy in */
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns only the DeclaredZone deployment steps of this in the order they will be performed */
public List<DeclaredZone> zones() {
return steps.stream().filter(step -> step instanceof DeclaredZone).map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay")) {
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
} else if (stepTag.getTagName().equals("parallel")) {
List<DeclaredZone> zones = new ArrayList<>();
for (Element regionTag : XML.getChildren(stepTag)) {
zones.add(readDeclaredZone(environment, regionTag));
}
steps.add(new ParallelZones(zones));
} else {
steps.add(readDeclaredZone(environment, stepTag));
}
}
} else {
steps.add(new DeclaredZone(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
}
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag));
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
/** The list of zones to deploy in */
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | |
You messed up the alignment here | public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay")) {
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
} else if (stepTag.getTagName().equals("parallel")) {
List<DeclaredZone> zones = new ArrayList<>();
for (Element regionTag : XML.getChildren(stepTag)) {
zones.add(readDeclaredZone(environment, regionTag));
}
steps.add(new ParallelZones(zones));
} else {
steps.add(readDeclaredZone(environment, stepTag));
}
}
} else {
steps.add(new DeclaredZone(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
} | longAttribute("minutes", stepTag) * 60 + | public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay")) {
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
} else if (stepTag.getTagName().equals("parallel")) {
List<DeclaredZone> zones = new ArrayList<>();
for (Element regionTag : XML.getChildren(stepTag)) {
zones.add(readDeclaredZone(environment, regionTag));
}
steps.add(new ParallelZones(zones));
} else {
steps.add(readDeclaredZone(environment, stepTag));
}
}
} else {
steps.add(new DeclaredZone(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
private static void validateZones(List<Step> steps) {
List<DeclaredZone> zones = new ArrayList<>();
steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.forEach(zones::add);
steps.stream()
.filter(step -> step instanceof ParallelZones)
.map(ParallelZones.class::cast)
.flatMap(parallelZones -> parallelZones.zones().stream())
.forEach(zones::add);
Set<DeclaredZone> unique = new HashSet<>();
List<RegionName> duplicates = zones.stream()
.filter(z -> z.environment() == Environment.prod && !unique.add(z))
.map(z -> z.region().get())
.collect(Collectors.toList());
if (!duplicates.isEmpty()) {
throw new IllegalArgumentException("All declared regions must be unique, but found these " +
"duplicated regions: " + duplicates);
}
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns only the DeclaredZone deployment steps of this in the order they will be performed */
public List<DeclaredZone> zones() {
return steps.stream().filter(step -> step instanceof DeclaredZone).map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag));
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
/** The list of zones to deploy in */
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
private static void validateZones(List<Step> steps) {
List<DeclaredZone> zones = new ArrayList<>();
steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.forEach(zones::add);
steps.stream()
.filter(step -> step instanceof ParallelZones)
.map(ParallelZones.class::cast)
.flatMap(parallelZones -> parallelZones.zones().stream())
.forEach(zones::add);
Set<DeclaredZone> unique = new HashSet<>();
List<RegionName> duplicates = zones.stream()
.filter(z -> z.environment() == Environment.prod && !unique.add(z))
.map(z -> z.region().get())
.collect(Collectors.toList());
if (!duplicates.isEmpty()) {
throw new IllegalArgumentException("All declared regions must be unique, but found these " +
"duplicated regions: " + duplicates);
}
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns only the DeclaredZone deployment steps of this in the order they will be performed */
public List<DeclaredZone> zones() {
return steps.stream().filter(step -> step instanceof DeclaredZone).map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag));
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
/** The list of zones to deploy in */
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} |
Agreed, it's a bit inelegant. Any concrete suggestions? | private static void validateZones(List<Step> steps) {
List<DeclaredZone> zones = new ArrayList<>();
steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.forEach(zones::add);
steps.stream()
.filter(step -> step instanceof ParallelZones)
.map(ParallelZones.class::cast)
.flatMap(parallelZones -> parallelZones.zones().stream())
.forEach(zones::add);
Set<DeclaredZone> unique = new HashSet<>();
List<RegionName> duplicates = zones.stream()
.filter(z -> z.environment() == Environment.prod && !unique.add(z))
.map(z -> z.region().get())
.collect(Collectors.toList());
if (!duplicates.isEmpty()) {
throw new IllegalArgumentException("All declared regions must be unique, but found these " +
"duplicated regions: " + duplicates);
}
} | private static void validateZones(List<Step> steps) {
List<DeclaredZone> zones = new ArrayList<>();
steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.forEach(zones::add);
steps.stream()
.filter(step -> step instanceof ParallelZones)
.map(ParallelZones.class::cast)
.flatMap(parallelZones -> parallelZones.zones().stream())
.forEach(zones::add);
Set<DeclaredZone> unique = new HashSet<>();
List<RegionName> duplicates = zones.stream()
.filter(z -> z.environment() == Environment.prod && !unique.add(z))
.map(z -> z.region().get())
.collect(Collectors.toList());
if (!duplicates.isEmpty()) {
throw new IllegalArgumentException("All declared regions must be unique, but found these " +
"duplicated regions: " + duplicates);
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns only the DeclaredZone deployment steps of this in the order they will be performed */
public List<DeclaredZone> zones() {
return steps.stream().filter(step -> step instanceof DeclaredZone).map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay")) {
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
} else if (stepTag.getTagName().equals("parallel")) {
List<DeclaredZone> zones = new ArrayList<>();
for (Element regionTag : XML.getChildren(stepTag)) {
zones.add(readDeclaredZone(environment, regionTag));
}
steps.add(new ParallelZones(zones));
} else {
steps.add(readDeclaredZone(environment, stepTag));
}
}
} else {
steps.add(new DeclaredZone(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
}
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag));
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
/** The list of zones to deploy in */
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns only the DeclaredZone deployment steps of this in the order they will be performed */
public List<DeclaredZone> zones() {
return steps.stream().filter(step -> step instanceof DeclaredZone).map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay")) {
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
} else if (stepTag.getTagName().equals("parallel")) {
List<DeclaredZone> zones = new ArrayList<>();
for (Element regionTag : XML.getChildren(stepTag)) {
zones.add(readDeclaredZone(environment, regionTag));
}
steps.add(new ParallelZones(zones));
} else {
steps.add(readDeclaredZone(environment, stepTag));
}
}
} else {
steps.add(new DeclaredZone(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
}
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag));
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
/** The list of zones to deploy in */
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | |
Thanks, I think it's due to IntellIJ's default behavior. I'll restore it in my next PR. | public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay")) {
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
} else if (stepTag.getTagName().equals("parallel")) {
List<DeclaredZone> zones = new ArrayList<>();
for (Element regionTag : XML.getChildren(stepTag)) {
zones.add(readDeclaredZone(environment, regionTag));
}
steps.add(new ParallelZones(zones));
} else {
steps.add(readDeclaredZone(environment, stepTag));
}
}
} else {
steps.add(new DeclaredZone(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
} | longAttribute("minutes", stepTag) * 60 + | public static DeploymentSpec fromXml(String xmlForm) {
List<Step> steps = new ArrayList<>();
Optional<String> globalServiceId = Optional.empty();
Element root = XML.getDocument(xmlForm).getDocumentElement();
for (Element environmentTag : XML.getChildren(root)) {
if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
Environment environment = Environment.from(environmentTag.getTagName());
if (environment == Environment.prod) {
for (Element stepTag : XML.getChildren(environmentTag)) {
if (stepTag.getTagName().equals("delay")) {
steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
longAttribute("minutes", stepTag) * 60 +
longAttribute("seconds", stepTag))));
} else if (stepTag.getTagName().equals("parallel")) {
List<DeclaredZone> zones = new ArrayList<>();
for (Element regionTag : XML.getChildren(stepTag)) {
zones.add(readDeclaredZone(environment, regionTag));
}
steps.add(new ParallelZones(zones));
} else {
steps.add(readDeclaredZone(environment, stepTag));
}
}
} else {
steps.add(new DeclaredZone(environment));
}
if (environment == Environment.prod)
globalServiceId = readGlobalServiceId(environmentTag);
else if (readGlobalServiceId(environmentTag).isPresent())
throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
}
return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
private static void validateZones(List<Step> steps) {
List<DeclaredZone> zones = new ArrayList<>();
steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.forEach(zones::add);
steps.stream()
.filter(step -> step instanceof ParallelZones)
.map(ParallelZones.class::cast)
.flatMap(parallelZones -> parallelZones.zones().stream())
.forEach(zones::add);
Set<DeclaredZone> unique = new HashSet<>();
List<RegionName> duplicates = zones.stream()
.filter(z -> z.environment() == Environment.prod && !unique.add(z))
.map(z -> z.region().get())
.collect(Collectors.toList());
if (!duplicates.isEmpty()) {
throw new IllegalArgumentException("All declared regions must be unique, but found these " +
"duplicated regions: " + duplicates);
}
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns only the DeclaredZone deployment steps of this in the order they will be performed */
public List<DeclaredZone> zones() {
return steps.stream().filter(step -> step instanceof DeclaredZone).map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag));
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
/** The list of zones to deploy in */
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
ImmutableList.of(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
this(globalServiceId, upgradePolicy, steps, null);
}
private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private static void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
private static void validateZones(List<Step> steps) {
List<DeclaredZone> zones = new ArrayList<>();
steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.forEach(zones::add);
steps.stream()
.filter(step -> step instanceof ParallelZones)
.map(ParallelZones.class::cast)
.flatMap(parallelZones -> parallelZones.zones().stream())
.forEach(zones::add);
Set<DeclaredZone> unique = new HashSet<>();
List<RegionName> duplicates = zones.stream()
.filter(z -> z.environment() == Environment.prod && !unique.add(z))
.map(z -> z.region().get())
.collect(Collectors.toList());
if (!duplicates.isEmpty()) {
throw new IllegalArgumentException("All declared regions must be unique, but found these " +
"duplicated regions: " + duplicates);
}
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<Step> completeSteps(List<Step> steps) {
steps = new ArrayList<>(new LinkedHashSet<>(steps));
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns only the DeclaredZone deployment steps of this in the order they will be performed */
public List<DeclaredZone> zones() {
return steps.stream().filter(step -> step instanceof DeclaredZone).map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
try {
return fromXml(IOUtils.readAll(reader));
}
catch (IOException e) {
throw new IllegalArgumentException("Could not read deployment spec", e);
}
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
/** Returns the given attribute as an integer, or 0 if it is not present */
private static long longAttribute(String attributeName, Element tag) {
String value = tag.getAttribute(attributeName);
if (value == null || value.isEmpty()) return 0;
try {
return Long.parseLong(value);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
"' but got '" + value + "'");
}
}
private static boolean isEnvironmentName(String tagName) {
return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
}
private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag));
}
private static Optional<String> readGlobalServiceId(Element environmentTag) {
String globalServiceId = environmentTag.getAttribute("global-service-id");
if (globalServiceId == null || globalServiceId.isEmpty()) {
return Optional.empty();
}
else {
return Optional.of(globalServiceId);
}
}
private static UpgradePolicy readUpgradePolicy(Element root) {
Element upgradeElement = XML.getChild(root, "upgrade");
if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
String policy = upgradeElement.getAttribute("policy");
switch (policy) {
case "canary" : return UpgradePolicy.canary;
case "default" : return UpgradePolicy.defaultPolicy;
case "conservative" : return UpgradePolicy.conservative;
default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
"Must be one of " + Arrays.toString(UpgradePolicy.values()));
}
}
private static boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
"to control whether the region should receive production traffic");
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A delpoyment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
/** The list of zones to deploy in */
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
} |
Seems it no longer lists all duplicates. It fails to mention "prod.us-central-1" which appears in both `<parallel>` tags. | public void productionSpecWithDuplicateRegions() {
StringReader r = new StringReader(
"<deployment>\n" +
" <prod>\n" +
" <region active='true'>us-west-1</region>\n" +
" <parallel>\n" +
" <region active='true'>us-west-1</region>\n" +
" <region active='true'>us-central-1</region>\n" +
" <region active='true'>us-east-3</region>\n" +
" </parallel>\n" +
" <parallel>\n" +
" <region active='true'>eu-west-1</region>\n" +
" <region active='true'>us-central-1</region>\n" +
" </parallel>\n" +
" </prod>\n" +
"</deployment>"
);
try {
DeploymentSpec.fromXml(r);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage());
}
} | assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); | public void productionSpecWithDuplicateRegions() {
StringReader r = new StringReader(
"<deployment>\n" +
" <prod>\n" +
" <region active='true'>us-west-1</region>\n" +
" <parallel>\n" +
" <region active='true'>us-west-1</region>\n" +
" <region active='true'>us-central-1</region>\n" +
" <region active='true'>us-east-3</region>\n" +
" </parallel>\n" +
" </prod>\n" +
"</deployment>"
);
try {
DeploymentSpec.fromXml(r);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage());
}
} | class DeploymentSpecTest {
@Test
public void testSpec() {
String specXml = "<deployment version='1.0'>" +
" <test/>" +
"</deployment>";
StringReader r = new StringReader(specXml);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(specXml, spec.xmlForm());
assertEquals(1, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertFalse(spec.includes(Environment.staging, Optional.empty()));
assertFalse(spec.includes(Environment.prod, Optional.empty()));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void stagingSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <staging/>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(2, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertFalse(spec.includes(Environment.prod, Optional.empty()));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void minimalProductionSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <prod>" +
" <region active='false'>us-east1</region>" +
" <region active='true'>us-west1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(4, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active());
assertTrue(spec.steps().get(3).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(3)).active());
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.upgradePolicy());
}
@Test
public void maximalProductionSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <test/>" +
" <test/>" +
" <staging/>" +
" <prod>" +
" <region active='false'>us-east1</region>" +
" <region active='false'>us-east1</region>" +
" <delay hours='3' minutes='30'/>" +
" <region active='true'>us-west1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(5, spec.steps().size());
assertEquals(4, spec.zones().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active());
assertTrue(spec.steps().get(3) instanceof DeploymentSpec.Delay);
assertEquals(3 * 60 * 60 + 30 * 60, ((DeploymentSpec.Delay)spec.steps().get(3)).duration().getSeconds());
assertTrue(spec.steps().get(4).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(4)).active());
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void productionSpecWithGlobalServiceId() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <prod global-service-id='query'>" +
" <region active='true'>us-east-1</region>" +
" <region active='true'>us-west-1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(spec.globalServiceId(), Optional.of("query"));
}
@Test(expected=IllegalArgumentException.class)
public void globalServiceIdInTest() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <test global-service-id='query' />" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
}
@Test(expected=IllegalArgumentException.class)
public void globalServiceIdInStaging() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <staging global-service-id='query' />" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
}
@Test
public void productionSpecWithGlobalServiceIdBeforeStaging() {
StringReader r = new StringReader(
"<deployment>" +
" <test/>" +
" <prod global-service-id='qrs'>" +
" <region active='true'>us-west-1</region>" +
" <region active='true'>us-central-1</region>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
" <staging/>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("qrs", spec.globalServiceId().get());
}
@Test
public void productionSpecWithUpgradePolicy() {
StringReader r = new StringReader(
"<deployment>" +
" <upgrade policy='canary'/>" +
" <prod>" +
" <region active='true'>us-west-1</region>" +
" <region active='true'>us-central-1</region>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("canary", spec.upgradePolicy().toString());
}
@Test
public void maxDelayExceeded() {
try {
StringReader r = new StringReader(
"<deployment>" +
" <upgrade policy='canary'/>" +
" <prod>" +
" <region active='true'>us-west-1</region>" +
" <delay hours='23'/>" +
" <region active='true'>us-central-1</region>" +
" <delay minutes='59' seconds='61'/>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec.fromXml(r);
fail("Expected exception due to exceeding the max total delay");
}
catch (IllegalArgumentException e) {
assertEquals("The total delay specified is PT24H1S but max 24 hours is allowed", e.getMessage());
}
}
@Test
public void testEmpty() {
assertFalse(DeploymentSpec.empty.globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, DeploymentSpec.empty.upgradePolicy());
assertTrue(DeploymentSpec.empty.steps().isEmpty());
assertEquals("<deployment version='1.0'/>", DeploymentSpec.empty.xmlForm());
}
@Test
public void productionSpecWithParallelDeployments() {
StringReader r = new StringReader(
"<deployment>\n" +
" <prod> \n" +
" <region active='true'>us-west-1</region>\n" +
" <parallel>\n" +
" <region active='true'>us-central-1</region>\n" +
" <region active='true'>us-east-3</region>\n" +
" </parallel>\n" +
" </prod>\n" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
DeploymentSpec.ParallelZones parallelZones = ((DeploymentSpec.ParallelZones) spec.steps().get(3));
assertEquals(2, parallelZones.zones().size());
assertEquals(RegionName.from("us-central-1"), parallelZones.zones().get(0).region().get());
assertEquals(RegionName.from("us-east-3"), parallelZones.zones().get(1).region().get());
}
@Test
} | class DeploymentSpecTest {
@Test
public void testSpec() {
String specXml = "<deployment version='1.0'>" +
" <test/>" +
"</deployment>";
StringReader r = new StringReader(specXml);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(specXml, spec.xmlForm());
assertEquals(1, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertFalse(spec.includes(Environment.staging, Optional.empty()));
assertFalse(spec.includes(Environment.prod, Optional.empty()));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void stagingSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <staging/>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(2, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertFalse(spec.includes(Environment.prod, Optional.empty()));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void minimalProductionSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <prod>" +
" <region active='false'>us-east1</region>" +
" <region active='true'>us-west1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(4, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active());
assertTrue(spec.steps().get(3).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(3)).active());
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.upgradePolicy());
}
@Test
public void maximalProductionSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <test/>" +
" <test/>" +
" <staging/>" +
" <prod>" +
" <region active='false'>us-east1</region>" +
" <region active='false'>us-east1</region>" +
" <delay hours='3' minutes='30'/>" +
" <region active='true'>us-west1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(5, spec.steps().size());
assertEquals(4, spec.zones().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active());
assertTrue(spec.steps().get(3) instanceof DeploymentSpec.Delay);
assertEquals(3 * 60 * 60 + 30 * 60, ((DeploymentSpec.Delay)spec.steps().get(3)).duration().getSeconds());
assertTrue(spec.steps().get(4).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(4)).active());
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void productionSpecWithGlobalServiceId() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <prod global-service-id='query'>" +
" <region active='true'>us-east-1</region>" +
" <region active='true'>us-west-1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(spec.globalServiceId(), Optional.of("query"));
}
@Test(expected=IllegalArgumentException.class)
public void globalServiceIdInTest() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <test global-service-id='query' />" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
}
@Test(expected=IllegalArgumentException.class)
public void globalServiceIdInStaging() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <staging global-service-id='query' />" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
}
@Test
public void productionSpecWithGlobalServiceIdBeforeStaging() {
StringReader r = new StringReader(
"<deployment>" +
" <test/>" +
" <prod global-service-id='qrs'>" +
" <region active='true'>us-west-1</region>" +
" <region active='true'>us-central-1</region>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
" <staging/>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("qrs", spec.globalServiceId().get());
}
@Test
public void productionSpecWithUpgradePolicy() {
StringReader r = new StringReader(
"<deployment>" +
" <upgrade policy='canary'/>" +
" <prod>" +
" <region active='true'>us-west-1</region>" +
" <region active='true'>us-central-1</region>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("canary", spec.upgradePolicy().toString());
}
@Test
public void maxDelayExceeded() {
try {
StringReader r = new StringReader(
"<deployment>" +
" <upgrade policy='canary'/>" +
" <prod>" +
" <region active='true'>us-west-1</region>" +
" <delay hours='23'/>" +
" <region active='true'>us-central-1</region>" +
" <delay minutes='59' seconds='61'/>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec.fromXml(r);
fail("Expected exception due to exceeding the max total delay");
}
catch (IllegalArgumentException e) {
assertEquals("The total delay specified is PT24H1S but max 24 hours is allowed", e.getMessage());
}
}
@Test
public void testEmpty() {
assertFalse(DeploymentSpec.empty.globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, DeploymentSpec.empty.upgradePolicy());
assertTrue(DeploymentSpec.empty.steps().isEmpty());
assertEquals("<deployment version='1.0'/>", DeploymentSpec.empty.xmlForm());
}
@Test
public void productionSpecWithParallelDeployments() {
StringReader r = new StringReader(
"<deployment>\n" +
" <prod> \n" +
" <region active='true'>us-west-1</region>\n" +
" <parallel>\n" +
" <region active='true'>us-central-1</region>\n" +
" <region active='true'>us-east-3</region>\n" +
" </parallel>\n" +
" </prod>\n" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
DeploymentSpec.ParallelZones parallelZones = ((DeploymentSpec.ParallelZones) spec.steps().get(3));
assertEquals(2, parallelZones.zones().size());
assertEquals(RegionName.from("us-central-1"), parallelZones.zones().get(0).region().get());
assertEquals(RegionName.from("us-east-3"), parallelZones.zones().get(1).region().get());
}
@Test
} |
Sure, but I don't think it's ever worth additional code to create more complete error messages like that: very small probability * very small added value | public void productionSpecWithDuplicateRegions() {
StringReader r = new StringReader(
"<deployment>\n" +
" <prod>\n" +
" <region active='true'>us-west-1</region>\n" +
" <parallel>\n" +
" <region active='true'>us-west-1</region>\n" +
" <region active='true'>us-central-1</region>\n" +
" <region active='true'>us-east-3</region>\n" +
" </parallel>\n" +
" <parallel>\n" +
" <region active='true'>eu-west-1</region>\n" +
" <region active='true'>us-central-1</region>\n" +
" </parallel>\n" +
" </prod>\n" +
"</deployment>"
);
try {
DeploymentSpec.fromXml(r);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage());
}
} | assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage()); | public void productionSpecWithDuplicateRegions() {
StringReader r = new StringReader(
"<deployment>\n" +
" <prod>\n" +
" <region active='true'>us-west-1</region>\n" +
" <parallel>\n" +
" <region active='true'>us-west-1</region>\n" +
" <region active='true'>us-central-1</region>\n" +
" <region active='true'>us-east-3</region>\n" +
" </parallel>\n" +
" </prod>\n" +
"</deployment>"
);
try {
DeploymentSpec.fromXml(r);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("prod.us-west-1 is listed twice in deployment.xml", e.getMessage());
}
} | class DeploymentSpecTest {
@Test
public void testSpec() {
String specXml = "<deployment version='1.0'>" +
" <test/>" +
"</deployment>";
StringReader r = new StringReader(specXml);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(specXml, spec.xmlForm());
assertEquals(1, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertFalse(spec.includes(Environment.staging, Optional.empty()));
assertFalse(spec.includes(Environment.prod, Optional.empty()));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void stagingSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <staging/>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(2, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertFalse(spec.includes(Environment.prod, Optional.empty()));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void minimalProductionSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <prod>" +
" <region active='false'>us-east1</region>" +
" <region active='true'>us-west1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(4, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active());
assertTrue(spec.steps().get(3).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(3)).active());
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.upgradePolicy());
}
@Test
public void maximalProductionSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <test/>" +
" <test/>" +
" <staging/>" +
" <prod>" +
" <region active='false'>us-east1</region>" +
" <region active='false'>us-east1</region>" +
" <delay hours='3' minutes='30'/>" +
" <region active='true'>us-west1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(5, spec.steps().size());
assertEquals(4, spec.zones().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active());
assertTrue(spec.steps().get(3) instanceof DeploymentSpec.Delay);
assertEquals(3 * 60 * 60 + 30 * 60, ((DeploymentSpec.Delay)spec.steps().get(3)).duration().getSeconds());
assertTrue(spec.steps().get(4).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(4)).active());
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void productionSpecWithGlobalServiceId() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <prod global-service-id='query'>" +
" <region active='true'>us-east-1</region>" +
" <region active='true'>us-west-1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(spec.globalServiceId(), Optional.of("query"));
}
@Test(expected=IllegalArgumentException.class)
public void globalServiceIdInTest() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <test global-service-id='query' />" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
}
@Test(expected=IllegalArgumentException.class)
public void globalServiceIdInStaging() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <staging global-service-id='query' />" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
}
@Test
public void productionSpecWithGlobalServiceIdBeforeStaging() {
StringReader r = new StringReader(
"<deployment>" +
" <test/>" +
" <prod global-service-id='qrs'>" +
" <region active='true'>us-west-1</region>" +
" <region active='true'>us-central-1</region>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
" <staging/>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("qrs", spec.globalServiceId().get());
}
@Test
public void productionSpecWithUpgradePolicy() {
StringReader r = new StringReader(
"<deployment>" +
" <upgrade policy='canary'/>" +
" <prod>" +
" <region active='true'>us-west-1</region>" +
" <region active='true'>us-central-1</region>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("canary", spec.upgradePolicy().toString());
}
@Test
public void maxDelayExceeded() {
try {
StringReader r = new StringReader(
"<deployment>" +
" <upgrade policy='canary'/>" +
" <prod>" +
" <region active='true'>us-west-1</region>" +
" <delay hours='23'/>" +
" <region active='true'>us-central-1</region>" +
" <delay minutes='59' seconds='61'/>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec.fromXml(r);
fail("Expected exception due to exceeding the max total delay");
}
catch (IllegalArgumentException e) {
assertEquals("The total delay specified is PT24H1S but max 24 hours is allowed", e.getMessage());
}
}
@Test
public void testEmpty() {
assertFalse(DeploymentSpec.empty.globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, DeploymentSpec.empty.upgradePolicy());
assertTrue(DeploymentSpec.empty.steps().isEmpty());
assertEquals("<deployment version='1.0'/>", DeploymentSpec.empty.xmlForm());
}
@Test
public void productionSpecWithParallelDeployments() {
StringReader r = new StringReader(
"<deployment>\n" +
" <prod> \n" +
" <region active='true'>us-west-1</region>\n" +
" <parallel>\n" +
" <region active='true'>us-central-1</region>\n" +
" <region active='true'>us-east-3</region>\n" +
" </parallel>\n" +
" </prod>\n" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
DeploymentSpec.ParallelZones parallelZones = ((DeploymentSpec.ParallelZones) spec.steps().get(3));
assertEquals(2, parallelZones.zones().size());
assertEquals(RegionName.from("us-central-1"), parallelZones.zones().get(0).region().get());
assertEquals(RegionName.from("us-east-3"), parallelZones.zones().get(1).region().get());
}
@Test
} | class DeploymentSpecTest {
@Test
public void testSpec() {
String specXml = "<deployment version='1.0'>" +
" <test/>" +
"</deployment>";
StringReader r = new StringReader(specXml);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(specXml, spec.xmlForm());
assertEquals(1, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertFalse(spec.includes(Environment.staging, Optional.empty()));
assertFalse(spec.includes(Environment.prod, Optional.empty()));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void stagingSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <staging/>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(2, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertFalse(spec.includes(Environment.prod, Optional.empty()));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void minimalProductionSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <prod>" +
" <region active='false'>us-east1</region>" +
" <region active='true'>us-west1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(4, spec.steps().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active());
assertTrue(spec.steps().get(3).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(3)).active());
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.upgradePolicy());
}
@Test
public void maximalProductionSpec() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <test/>" +
" <test/>" +
" <staging/>" +
" <prod>" +
" <region active='false'>us-east1</region>" +
" <region active='false'>us-east1</region>" +
" <delay hours='3' minutes='30'/>" +
" <region active='true'>us-west1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(5, spec.steps().size());
assertEquals(4, spec.zones().size());
assertTrue(spec.steps().get(0).deploysTo(Environment.test));
assertTrue(spec.steps().get(1).deploysTo(Environment.staging));
assertTrue(spec.steps().get(2).deploysTo(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.steps().get(2)).active());
assertTrue(spec.steps().get(3) instanceof DeploymentSpec.Delay);
assertEquals(3 * 60 * 60 + 30 * 60, ((DeploymentSpec.Delay)spec.steps().get(3)).duration().getSeconds());
assertTrue(spec.steps().get(4).deploysTo(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.steps().get(4)).active());
assertTrue(spec.includes(Environment.test, Optional.empty()));
assertFalse(spec.includes(Environment.test, Optional.of(RegionName.from("region1"))));
assertTrue(spec.includes(Environment.staging, Optional.empty()));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-east1"))));
assertTrue(spec.includes(Environment.prod, Optional.of(RegionName.from("us-west1"))));
assertFalse(spec.includes(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.globalServiceId().isPresent());
}
@Test
public void productionSpecWithGlobalServiceId() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <prod global-service-id='query'>" +
" <region active='true'>us-east-1</region>" +
" <region active='true'>us-west-1</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(spec.globalServiceId(), Optional.of("query"));
}
@Test(expected=IllegalArgumentException.class)
public void globalServiceIdInTest() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <test global-service-id='query' />" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
}
@Test(expected=IllegalArgumentException.class)
public void globalServiceIdInStaging() {
StringReader r = new StringReader(
"<deployment version='1.0'>" +
" <staging global-service-id='query' />" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
}
@Test
public void productionSpecWithGlobalServiceIdBeforeStaging() {
StringReader r = new StringReader(
"<deployment>" +
" <test/>" +
" <prod global-service-id='qrs'>" +
" <region active='true'>us-west-1</region>" +
" <region active='true'>us-central-1</region>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
" <staging/>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("qrs", spec.globalServiceId().get());
}
@Test
public void productionSpecWithUpgradePolicy() {
StringReader r = new StringReader(
"<deployment>" +
" <upgrade policy='canary'/>" +
" <prod>" +
" <region active='true'>us-west-1</region>" +
" <region active='true'>us-central-1</region>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("canary", spec.upgradePolicy().toString());
}
@Test
public void maxDelayExceeded() {
try {
StringReader r = new StringReader(
"<deployment>" +
" <upgrade policy='canary'/>" +
" <prod>" +
" <region active='true'>us-west-1</region>" +
" <delay hours='23'/>" +
" <region active='true'>us-central-1</region>" +
" <delay minutes='59' seconds='61'/>" +
" <region active='true'>us-east-3</region>" +
" </prod>" +
"</deployment>"
);
DeploymentSpec.fromXml(r);
fail("Expected exception due to exceeding the max total delay");
}
catch (IllegalArgumentException e) {
assertEquals("The total delay specified is PT24H1S but max 24 hours is allowed", e.getMessage());
}
}
@Test
public void testEmpty() {
assertFalse(DeploymentSpec.empty.globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, DeploymentSpec.empty.upgradePolicy());
assertTrue(DeploymentSpec.empty.steps().isEmpty());
assertEquals("<deployment version='1.0'/>", DeploymentSpec.empty.xmlForm());
}
@Test
public void productionSpecWithParallelDeployments() {
StringReader r = new StringReader(
"<deployment>\n" +
" <prod> \n" +
" <region active='true'>us-west-1</region>\n" +
" <parallel>\n" +
" <region active='true'>us-central-1</region>\n" +
" <region active='true'>us-east-3</region>\n" +
" </parallel>\n" +
" </prod>\n" +
"</deployment>"
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
DeploymentSpec.ParallelZones parallelZones = ((DeploymentSpec.ParallelZones) spec.steps().get(3));
assertEquals(2, parallelZones.zones().size());
assertEquals(RegionName.from("us-central-1"), parallelZones.zones().get(0).region().get());
assertEquals(RegionName.from("us-east-3"), parallelZones.zones().get(1).region().get());
}
@Test
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.