comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
No, because `nodes` is a list of `Node`, not `String`. I can replace it with this version for increased code clarity. ```Java Set<String> notFoundNodes = new HashSet<>(hostnames); notFoundNodes.removeAll(nodes.stream().map(Node::hostname).collect(Collectors.toList())); ``` | public List<Node> nodesFromHostnames(List<String> hostnames) {
List<Node> nodes = hostnames.stream()
.filter(h -> nodeMap.containsKey(h))
.map(h -> nodeMap.get(h))
.collect(Collectors.toList());
if (nodes.size() != hostnames.size()) {
List<String> notFoundNodes = hostnames.stream()
.filter(h -> !nodes.stream()
.map(Node::hostname).collect(Collectors.toSet()).contains(h))
.collect(Collectors.toList());
throw new IllegalArgumentException(String.format("Host(s) not found: [ %s ]",
String.join(", ", notFoundNodes)));
}
return nodes;
} | List<String> notFoundNodes = hostnames.stream() | public List<Node> nodesFromHostnames(List<String> hostnames) {
List<Node> nodes = hostnames.stream()
.filter(h -> nodeMap.containsKey(h))
.map(h -> nodeMap.get(h))
.collect(Collectors.toList());
if (nodes.size() != hostnames.size()) {
Set<String> notFoundNodes = new HashSet<>(hostnames);
notFoundNodes.removeAll(nodes.stream().map(Node::hostname).collect(Collectors.toList()));
throw new IllegalArgumentException(String.format("Host(s) not found: [ %s ]",
String.join(", ", notFoundNodes)));
}
return nodes;
} | class CapacityChecker {
private List<Node> hosts;
Map<String, Node> nodeMap;
private Map<Node, List<Node>> nodeChildren;
private Map<Node, AllocationResources> availableResources;
public AllocationHistory allocationHistory = null;
public CapacityChecker(NodeRepository nodeRepository) {
this.hosts = getHosts(nodeRepository);
List<Node> tenants = getTenants(nodeRepository, hosts);
nodeMap = constructHostnameToNodeMap(hosts);
this.nodeChildren = constructNodeChildrenMap(tenants, hosts, nodeMap);
this.availableResources = constructAvailableResourcesMap(hosts, nodeChildren);
}
public List<Node> getHosts() {
return hosts;
}
public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() {
Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(hosts, nodeChildren, availableResources);
return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved, hosts, nodeChildren, availableResources);
}
protected List<Node> findOvercommittedHosts() {
return findOvercommittedNodes(availableResources);
}
public Optional<HostFailurePath> findHostRemovalFailure(List<Node> hostsToRemove) {
var removal = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources);
if (removal.isEmpty()) return Optional.empty();
HostFailurePath failurePath = new HostFailurePath();
failurePath.hostsCausingFailure = hostsToRemove;
failurePath.failureReason = removal.get();
return Optional.of(failurePath);
}
private static Node.State[] relevantNodeStates = {
Node.State.active,
Node.State.inactive,
Node.State.dirty,
Node.State.provisioned,
Node.State.ready,
Node.State.reserved
};
private List<Node> getHosts(NodeRepository nodeRepository) {
return nodeRepository.getNodes(NodeType.host, relevantNodeStates);
}
private List<Node> getTenants(NodeRepository nodeRepository, List<Node> hosts) {
var parentNames = hosts.stream().map(Node::hostname).collect(Collectors.toSet());
return nodeRepository.getNodes(NodeType.tenant, relevantNodeStates).stream()
.filter(t -> parentNames.contains(t.parentHostname().orElse("")))
.collect(Collectors.toList());
}
private Optional<HostFailurePath> greedyHeuristicFindFailurePath(Map<Node, Integer> heuristic, List<Node> hosts,
Map<Node, List<Node>> nodeChildren,
Map<Node, AllocationResources> availableResources) {
if (hosts.size() == 0) return Optional.empty();
List<Node> parentRemovalPriorityList = heuristic.entrySet().stream()
.sorted(Comparator.comparingInt(Map.Entry::getValue))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
for (int i = 1; i <= parentRemovalPriorityList.size(); i++) {
List<Node> hostsToRemove = parentRemovalPriorityList.subList(0, i);
var hostRemovalFailure = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources);
if (hostRemovalFailure.isPresent()) {
HostFailurePath failurePath = new HostFailurePath();
failurePath.hostsCausingFailure = hostsToRemove;
failurePath.failureReason = hostRemovalFailure.get();
return Optional.of(failurePath);
}
}
throw new IllegalStateException("No path to failure found. This should be impossible!");
}
private Map<String, Node> constructHostnameToNodeMap(List<Node> nodes) {
return nodes.stream().collect(Collectors.toMap(Node::hostname, n -> n));
}
private Map<Node, List<Node>> constructNodeChildrenMap(List<Node> tenants, List<Node> hosts, Map<String, Node> hostnameToNode) {
Map<Node, List<Node>> nodeChildren = tenants.stream()
.filter(n -> n.parentHostname().isPresent())
.filter(n -> hostnameToNode.containsKey(n.parentHostname().get()))
.collect(Collectors.groupingBy(
n -> hostnameToNode.get(n.parentHostname().orElseThrow())));
for (var host : hosts) nodeChildren.putIfAbsent(host, List.of());
return nodeChildren;
}
private Map<Node, AllocationResources> constructAvailableResourcesMap(List<Node> hosts, Map<Node, List<Node>> nodeChildren) {
Map<Node, AllocationResources> availableResources = new HashMap<>();
for (var host : hosts) {
NodeResources hostResources = host.flavor().resources();
int occupiedIps = 0;
Set<String> ipPool = host.ipAddressPool().asSet();
for (var child : nodeChildren.get(host)) {
hostResources = hostResources.subtract(child.flavor().resources().withDiskSpeed(NodeResources.DiskSpeed.any));
occupiedIps += child.ipAddresses().stream().filter(ipPool::contains).count();
}
availableResources.put(host, new AllocationResources(hostResources, host.ipAddressPool().asSet().size() - occupiedIps));
}
return availableResources;
}
/**
* Computes a heuristic for each host, with a lower score indicating a higher perceived likelihood that removing
* the host causes an unrecoverable state
*/
private Map<Node, Integer> computeMaximalRepeatedRemovals(List<Node> hosts, Map<Node, List<Node>> nodeChildren,
Map<Node, AllocationResources> availableResources) {
Map<Node, Integer> timesNodeCanBeRemoved = hosts.stream().collect(Collectors.toMap(
Function.identity(),
_x -> Integer.MAX_VALUE
));
for (Node host : hosts) {
List<Node> children = nodeChildren.get(host);
if (children.size() == 0) continue;
Map<Node, AllocationResources> resourceMap = new HashMap<>(availableResources);
Map<Node, List<Allocation>> containedAllocations = collateAllocations(nodeChildren);
int timesHostCanBeRemoved = 0;
Optional<Node> unallocatedNode;
while (timesHostCanBeRemoved < 1000) {
unallocatedNode = tryAllocateNodes(nodeChildren.get(host), hosts, resourceMap, containedAllocations);
if (unallocatedNode.isEmpty()) {
timesHostCanBeRemoved++;
} else break;
}
timesNodeCanBeRemoved.put(host, timesHostCanBeRemoved);
}
return timesNodeCanBeRemoved;
}
private List<Node> findOvercommittedNodes(Map<Node, AllocationResources> availableResources) {
List<Node> overcommittedNodes = new ArrayList<>();
for (var entry : availableResources.entrySet()) {
var resources = entry.getValue().nodeResources;
if (resources.vcpu() < 0 || resources.memoryGb() < 0 || resources.diskGb() < 0) {
overcommittedNodes.add(entry.getKey());
}
}
return overcommittedNodes;
}
private Map<Node, List<Allocation>> collateAllocations(Map<Node, List<Node>> nodeChildren) {
return nodeChildren.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> e.getValue().stream()
.map(Node::allocation).flatMap(Optional::stream)
.collect(Collectors.toList())
));
}
/**
* Tests whether it's possible to remove the provided hosts.
* Does not mutate any input variable.
* @return Empty optional if removal is possible, information on what caused the failure otherwise
*/
private Optional<HostRemovalFailure> findHostRemovalFailure(List<Node> hostsToRemove, List<Node> allHosts,
Map<Node, List<Node>> nodechildren,
Map<Node, AllocationResources> availableResources) {
var containedAllocations = collateAllocations(nodechildren);
var resourceMap = new HashMap<>(availableResources);
List<Node> validAllocationTargets = allHosts.stream()
.filter(h -> !hostsToRemove.contains(h))
.collect(Collectors.toList());
if (validAllocationTargets.size() == 0) {
return Optional.of(HostRemovalFailure.none());
}
allocationHistory = new AllocationHistory();
for (var host : hostsToRemove) {
Optional<Node> unallocatedNode = tryAllocateNodes(nodechildren.get(host),
validAllocationTargets, resourceMap, containedAllocations, true);
if (unallocatedNode.isPresent()) {
AllocationFailureReasonList failures = collateAllocationFailures(unallocatedNode.get(),
validAllocationTargets, resourceMap, containedAllocations);
return Optional.of(HostRemovalFailure.create(host, unallocatedNode.get(), failures));
}
}
return Optional.empty();
}
/**
* Attempts to allocate the listed nodes to a new host, mutating availableResources and containedAllocations,
* optionally returning the first node to fail, if one does.
* */
private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
return tryAllocateNodes(nodes, hosts, availableResources, containedAllocations, false);
}
private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations, boolean withHistory) {
for (var node : nodes) {
var newParent = tryAllocateNode(node, hosts, availableResources, containedAllocations);
if (newParent.isEmpty()) {
if (withHistory) allocationHistory.addEntry(node, null, 0);
return Optional.of(node);
}
if (withHistory) {
long eligibleParents =
hosts.stream().filter(h ->
!violatesParentHostPolicy(node, h, containedAllocations)
&& availableResources.get(h).satisfies(AllocationResources.from(node.flavor().resources()))).count();
allocationHistory.addEntry(node, newParent.get(), eligibleParents + 1);
}
}
return Optional.empty();
}
/**
* @return The parent to which the node was allocated, if it was successfully allocated.
*/
private Optional<Node> tryAllocateNode(Node node, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
AllocationResources requiredNodeResources = AllocationResources.from(node.flavor().resources());
for (var host : hosts) {
var availableHostResources = availableResources.get(host);
if (violatesParentHostPolicy(node, host, containedAllocations)) {
continue;
}
if (availableHostResources.satisfies(requiredNodeResources)) {
availableResources.put(host, availableHostResources.subtract(requiredNodeResources));
if (node.allocation().isPresent()) {
containedAllocations.get(host).add(node.allocation().get());
}
return Optional.of(host);
}
}
return Optional.empty();
}
private static boolean violatesParentHostPolicy(Node node, Node host, Map<Node, List<Allocation>> containedAllocations) {
if (node.allocation().isEmpty()) return false;
Allocation nodeAllocation = node.allocation().get();
for (var allocation : containedAllocations.get(host)) {
if (allocation.membership().cluster().equalsIgnoringGroupAndVespaVersion(nodeAllocation.membership().cluster())
&& allocation.owner().equals(nodeAllocation.owner())) {
return true;
}
}
return false;
}
private AllocationFailureReasonList collateAllocationFailures(Node node, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
List<AllocationFailureReason> allocationFailureReasons = new ArrayList<>();
for (var host : hosts) {
AllocationFailureReason reason = new AllocationFailureReason(host);
var availableHostResources = availableResources.get(host);
reason.violatesParentHostPolicy = violatesParentHostPolicy(node, host, containedAllocations);
NodeResources l = availableHostResources.nodeResources;
NodeResources r = node.flavor().resources();
if (l.vcpu() < r.vcpu()) { reason.insufficientVcpu = true; }
if (l.memoryGb() < r.memoryGb()) { reason.insufficientMemoryGb = true; }
if (l.diskGb() < r.diskGb()) { reason.insufficientDiskGb = true; }
if (r.diskSpeed() != NodeResources.DiskSpeed.any && r.diskSpeed() != l.diskSpeed())
{ reason.incompatibleDiskSpeed = true; }
if (availableHostResources.availableIPs < 1) { reason.insufficientAvailableIPs = true; }
allocationFailureReasons.add(reason);
}
return new AllocationFailureReasonList(allocationFailureReasons);
}
/**
* Contains the list of hosts that, upon being removed, caused an unrecoverable state,
* as well as the specific host and tenant which caused it.
*/
public static class HostFailurePath {
public List<Node> hostsCausingFailure;
public HostRemovalFailure failureReason;
}
/**
* Data class used for detailing why removing the given tenant from the given host was unsuccessful.
* A failure might not be caused by failing to allocate a specific tenant, in which case the fields
* will be empty.
*/
public static class HostRemovalFailure {
public Optional<Node> host;
public Optional<Node> tenant;
public AllocationFailureReasonList failureReasons;
public static HostRemovalFailure none() {
return new HostRemovalFailure(
Optional.empty(),
Optional.empty(),
new AllocationFailureReasonList(List.of()));
}
public static HostRemovalFailure create(Node host, Node tenant, AllocationFailureReasonList failureReasons) {
return new HostRemovalFailure(
Optional.of(host),
Optional.of(tenant),
failureReasons);
}
private HostRemovalFailure(Optional<Node> host, Optional<Node> tenant, AllocationFailureReasonList failureReasons) {
this.host = host;
this.tenant = tenant;
this.failureReasons = failureReasons;
}
@Override
public String toString() {
if (host.isEmpty() || tenant.isEmpty()) return "No removal candidates exists.";
return String.format(
"Failure to remove host %s" +
"\n\tNo new host found for tenant %s:" +
"\n\t\tSingular Reasons: %s" +
"\n\t\tTotal Reasons: %s",
this.host.get().hostname(),
this.tenant.get().hostname(),
this.failureReasons.singularReasonFailures().toString(),
this.failureReasons.toString()
);
}
}
/**
* Used to describe the resources required for a tenant, and available to a host.
*/
private static class AllocationResources {
NodeResources nodeResources;
int availableIPs;
public static AllocationResources from(NodeResources nodeResources) {
return new AllocationResources(nodeResources, 1);
}
public AllocationResources(NodeResources nodeResources, int availableIPs) {
this.nodeResources = nodeResources;
this.availableIPs = availableIPs;
}
public boolean satisfies(AllocationResources other) {
if (!this.nodeResources.satisfies(other.nodeResources)) return false;
return this.availableIPs >= other.availableIPs;
}
public AllocationResources subtract(AllocationResources other) {
return new AllocationResources(this.nodeResources.subtract(other.nodeResources), this.availableIPs - other.availableIPs);
}
}
/**
* Keeps track of the reason why a host rejected an allocation.
*/
private static class AllocationFailureReason {
Node host;
public AllocationFailureReason (Node host) {
this.host = host;
}
public boolean insufficientVcpu = false;
public boolean insufficientMemoryGb = false;
public boolean insufficientDiskGb = false;
public boolean incompatibleDiskSpeed = false;
public boolean insufficientAvailableIPs = false;
public boolean violatesParentHostPolicy = false;
public int numberOfReasons() {
int n = 0;
if (insufficientVcpu) n++;
if (insufficientMemoryGb) n++;
if (insufficientDiskGb) n++;
if (incompatibleDiskSpeed) n++;
if (insufficientAvailableIPs) n++;
if (violatesParentHostPolicy) n++;
return n;
}
@Override
public String toString() {
List<String> reasons = new ArrayList<>();
if (insufficientVcpu) reasons.add("insufficientVcpu");
if (insufficientMemoryGb) reasons.add("insufficientMemoryGb");
if (insufficientDiskGb) reasons.add("insufficientDiskGb");
if (incompatibleDiskSpeed) reasons.add("incompatibleDiskSpeed");
if (insufficientAvailableIPs) reasons.add("insufficientAvailableIPs");
if (violatesParentHostPolicy) reasons.add("violatesParentHostPolicy");
return String.format("[%s]", String.join(", ", reasons));
}
}
/**
* Provides convenient methods for tallying failures.
*/
public static class AllocationFailureReasonList {
private List<AllocationFailureReason> allocationFailureReasons;
public AllocationFailureReasonList(List<AllocationFailureReason> allocationFailureReasons) {
this.allocationFailureReasons = allocationFailureReasons;
}
public long insufficientVcpu() { return allocationFailureReasons.stream().filter(r -> r.insufficientVcpu).count(); }
public long insufficientMemoryGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientMemoryGb).count(); }
public long insufficientDiskGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientDiskGb).count(); }
public long incompatibleDiskSpeed() { return allocationFailureReasons.stream().filter(r -> r.incompatibleDiskSpeed).count(); }
public long insufficientAvailableIps() { return allocationFailureReasons.stream().filter(r -> r.insufficientAvailableIPs).count(); }
public long violatesParentHostPolicy() { return allocationFailureReasons.stream().filter(r -> r.violatesParentHostPolicy).count(); }
public AllocationFailureReasonList singularReasonFailures() {
return new AllocationFailureReasonList(allocationFailureReasons.stream()
.filter(reason -> reason.numberOfReasons() == 1).collect(Collectors.toList()));
}
public AllocationFailureReasonList multipleReasonFailures() {
return new AllocationFailureReasonList(allocationFailureReasons.stream()
.filter(reason -> reason.numberOfReasons() > 1).collect(Collectors.toList()));
}
public long size() {
return allocationFailureReasons.size();
}
@Override
public String toString() {
return String.format("CPU (%3d), Memory (%3d), Disk size (%3d), Disk speed (%3d), IP (%3d), Parent-Host Policy (%3d)",
insufficientVcpu(), insufficientMemoryGb(), insufficientDiskGb(),
incompatibleDiskSpeed(), insufficientAvailableIps(), violatesParentHostPolicy());
}
}
public static class AllocationHistory {
public static class Entry {
public Node tenant;
public Node newParent;
public long eligibleParents;
public Entry(Node tenant, Node newParent, long eligibleParents) {
this.tenant = tenant;
this.newParent = newParent;
this.eligibleParents = eligibleParents;
}
@Override
public String toString() {
return String.format("%-20s %-65s -> %15s [%3d valid]",
tenant.hostname().replaceFirst("\\..+", ""),
tenant.flavor().resources(),
newParent == null ? "x" : newParent.hostname().replaceFirst("\\..+", ""),
this.eligibleParents
);
}
}
public List<Entry> historyEntries;
public AllocationHistory() {
this.historyEntries = new ArrayList<>();
}
public void addEntry(Node tenant, Node newParent, long eligibleParents) {
this.historyEntries.add(new Entry(tenant, newParent, eligibleParents));
}
public Set<String> oldParents() {
Set<String> oldParents = new HashSet<>();
for (var entry : historyEntries)
entry.tenant.parentHostname().ifPresent(oldParents::add);
return oldParents;
}
@Override
public String toString() {
StringBuilder out = new StringBuilder();
String currentParent = "";
for (var entry : historyEntries) {
String parentName = entry.tenant.parentHostname().orElseThrow();
if (!parentName.equals(currentParent)) {
currentParent = parentName;
out.append(parentName).append("\n");
}
out.append(entry.toString()).append("\n");
}
return out.toString();
}
}
} | class CapacityChecker {
private List<Node> hosts;
Map<String, Node> nodeMap;
private Map<Node, List<Node>> nodeChildren;
private Map<Node, AllocationResources> availableResources;
public AllocationHistory allocationHistory = null;
public CapacityChecker(NodeRepository nodeRepository) {
this.hosts = getHosts(nodeRepository);
List<Node> tenants = getTenants(nodeRepository, hosts);
nodeMap = constructHostnameToNodeMap(hosts);
this.nodeChildren = constructNodeChildrenMap(tenants, hosts, nodeMap);
this.availableResources = constructAvailableResourcesMap(hosts, nodeChildren);
}
public List<Node> getHosts() {
return hosts;
}
public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() {
Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(hosts, nodeChildren, availableResources);
return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved, hosts, nodeChildren, availableResources);
}
protected List<Node> findOvercommittedHosts() {
return findOvercommittedNodes(availableResources);
}
public Optional<HostFailurePath> findHostRemovalFailure(List<Node> hostsToRemove) {
var removal = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources);
if (removal.isEmpty()) return Optional.empty();
HostFailurePath failurePath = new HostFailurePath();
failurePath.hostsCausingFailure = hostsToRemove;
failurePath.failureReason = removal.get();
return Optional.of(failurePath);
}
private static Node.State[] relevantNodeStates = {
Node.State.active,
Node.State.inactive,
Node.State.dirty,
Node.State.provisioned,
Node.State.ready,
Node.State.reserved
};
private List<Node> getHosts(NodeRepository nodeRepository) {
return nodeRepository.getNodes(NodeType.host, relevantNodeStates);
}
private List<Node> getTenants(NodeRepository nodeRepository, List<Node> hosts) {
var parentNames = hosts.stream().map(Node::hostname).collect(Collectors.toSet());
return nodeRepository.getNodes(NodeType.tenant, relevantNodeStates).stream()
.filter(t -> parentNames.contains(t.parentHostname().orElse("")))
.collect(Collectors.toList());
}
private Optional<HostFailurePath> greedyHeuristicFindFailurePath(Map<Node, Integer> heuristic, List<Node> hosts,
Map<Node, List<Node>> nodeChildren,
Map<Node, AllocationResources> availableResources) {
if (hosts.size() == 0) return Optional.empty();
List<Node> parentRemovalPriorityList = heuristic.entrySet().stream()
.sorted(Comparator.comparingInt(Map.Entry::getValue))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
for (int i = 1; i <= parentRemovalPriorityList.size(); i++) {
List<Node> hostsToRemove = parentRemovalPriorityList.subList(0, i);
var hostRemovalFailure = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources);
if (hostRemovalFailure.isPresent()) {
HostFailurePath failurePath = new HostFailurePath();
failurePath.hostsCausingFailure = hostsToRemove;
failurePath.failureReason = hostRemovalFailure.get();
return Optional.of(failurePath);
}
}
throw new IllegalStateException("No path to failure found. This should be impossible!");
}
private Map<String, Node> constructHostnameToNodeMap(List<Node> nodes) {
return nodes.stream().collect(Collectors.toMap(Node::hostname, n -> n));
}
private Map<Node, List<Node>> constructNodeChildrenMap(List<Node> tenants, List<Node> hosts, Map<String, Node> hostnameToNode) {
Map<Node, List<Node>> nodeChildren = tenants.stream()
.filter(n -> n.parentHostname().isPresent())
.filter(n -> hostnameToNode.containsKey(n.parentHostname().get()))
.collect(Collectors.groupingBy(
n -> hostnameToNode.get(n.parentHostname().orElseThrow())));
for (var host : hosts) nodeChildren.putIfAbsent(host, List.of());
return nodeChildren;
}
private Map<Node, AllocationResources> constructAvailableResourcesMap(List<Node> hosts, Map<Node, List<Node>> nodeChildren) {
Map<Node, AllocationResources> availableResources = new HashMap<>();
for (var host : hosts) {
NodeResources hostResources = host.flavor().resources();
int occupiedIps = 0;
Set<String> ipPool = host.ipAddressPool().asSet();
for (var child : nodeChildren.get(host)) {
hostResources = hostResources.subtract(child.flavor().resources().withDiskSpeed(NodeResources.DiskSpeed.any));
occupiedIps += child.ipAddresses().stream().filter(ipPool::contains).count();
}
availableResources.put(host, new AllocationResources(hostResources, host.ipAddressPool().asSet().size() - occupiedIps));
}
return availableResources;
}
/**
* Computes a heuristic for each host, with a lower score indicating a higher perceived likelihood that removing
* the host causes an unrecoverable state
*/
private Map<Node, Integer> computeMaximalRepeatedRemovals(List<Node> hosts, Map<Node, List<Node>> nodeChildren,
Map<Node, AllocationResources> availableResources) {
Map<Node, Integer> timesNodeCanBeRemoved = hosts.stream().collect(Collectors.toMap(
Function.identity(),
_x -> Integer.MAX_VALUE
));
for (Node host : hosts) {
List<Node> children = nodeChildren.get(host);
if (children.size() == 0) continue;
Map<Node, AllocationResources> resourceMap = new HashMap<>(availableResources);
Map<Node, List<Allocation>> containedAllocations = collateAllocations(nodeChildren);
int timesHostCanBeRemoved = 0;
Optional<Node> unallocatedNode;
while (timesHostCanBeRemoved < 1000) {
unallocatedNode = tryAllocateNodes(nodeChildren.get(host), hosts, resourceMap, containedAllocations);
if (unallocatedNode.isEmpty()) {
timesHostCanBeRemoved++;
} else break;
}
timesNodeCanBeRemoved.put(host, timesHostCanBeRemoved);
}
return timesNodeCanBeRemoved;
}
private List<Node> findOvercommittedNodes(Map<Node, AllocationResources> availableResources) {
List<Node> overcommittedNodes = new ArrayList<>();
for (var entry : availableResources.entrySet()) {
var resources = entry.getValue().nodeResources;
if (resources.vcpu() < 0 || resources.memoryGb() < 0 || resources.diskGb() < 0) {
overcommittedNodes.add(entry.getKey());
}
}
return overcommittedNodes;
}
private Map<Node, List<Allocation>> collateAllocations(Map<Node, List<Node>> nodeChildren) {
return nodeChildren.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> e.getValue().stream()
.map(Node::allocation).flatMap(Optional::stream)
.collect(Collectors.toList())
));
}
/**
* Tests whether it's possible to remove the provided hosts.
* Does not mutate any input variable.
* @return Empty optional if removal is possible, information on what caused the failure otherwise
*/
private Optional<HostRemovalFailure> findHostRemovalFailure(List<Node> hostsToRemove, List<Node> allHosts,
Map<Node, List<Node>> nodechildren,
Map<Node, AllocationResources> availableResources) {
var containedAllocations = collateAllocations(nodechildren);
var resourceMap = new HashMap<>(availableResources);
List<Node> validAllocationTargets = allHosts.stream()
.filter(h -> !hostsToRemove.contains(h))
.collect(Collectors.toList());
if (validAllocationTargets.size() == 0) {
return Optional.of(HostRemovalFailure.none());
}
allocationHistory = new AllocationHistory();
for (var host : hostsToRemove) {
Optional<Node> unallocatedNode = tryAllocateNodes(nodechildren.get(host),
validAllocationTargets, resourceMap, containedAllocations, true);
if (unallocatedNode.isPresent()) {
AllocationFailureReasonList failures = collateAllocationFailures(unallocatedNode.get(),
validAllocationTargets, resourceMap, containedAllocations);
return Optional.of(HostRemovalFailure.create(host, unallocatedNode.get(), failures));
}
}
return Optional.empty();
}
/**
* Attempts to allocate the listed nodes to a new host, mutating availableResources and containedAllocations,
* optionally returning the first node to fail, if one does.
* */
private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
return tryAllocateNodes(nodes, hosts, availableResources, containedAllocations, false);
}
private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations, boolean withHistory) {
for (var node : nodes) {
var newParent = tryAllocateNode(node, hosts, availableResources, containedAllocations);
if (newParent.isEmpty()) {
if (withHistory) allocationHistory.addEntry(node, null, 0);
return Optional.of(node);
}
if (withHistory) {
long eligibleParents =
hosts.stream().filter(h ->
!violatesParentHostPolicy(node, h, containedAllocations)
&& availableResources.get(h).satisfies(AllocationResources.from(node.flavor().resources()))).count();
allocationHistory.addEntry(node, newParent.get(), eligibleParents + 1);
}
}
return Optional.empty();
}
/**
* @return The parent to which the node was allocated, if it was successfully allocated.
*/
private Optional<Node> tryAllocateNode(Node node, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
AllocationResources requiredNodeResources = AllocationResources.from(node.flavor().resources());
for (var host : hosts) {
var availableHostResources = availableResources.get(host);
if (violatesParentHostPolicy(node, host, containedAllocations)) {
continue;
}
if (availableHostResources.satisfies(requiredNodeResources)) {
availableResources.put(host, availableHostResources.subtract(requiredNodeResources));
if (node.allocation().isPresent()) {
containedAllocations.get(host).add(node.allocation().get());
}
return Optional.of(host);
}
}
return Optional.empty();
}
private static boolean violatesParentHostPolicy(Node node, Node host, Map<Node, List<Allocation>> containedAllocations) {
if (node.allocation().isEmpty()) return false;
Allocation nodeAllocation = node.allocation().get();
for (var allocation : containedAllocations.get(host)) {
if (allocation.membership().cluster().equalsIgnoringGroupAndVespaVersion(nodeAllocation.membership().cluster())
&& allocation.owner().equals(nodeAllocation.owner())) {
return true;
}
}
return false;
}
private AllocationFailureReasonList collateAllocationFailures(Node node, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
List<AllocationFailureReason> allocationFailureReasons = new ArrayList<>();
for (var host : hosts) {
AllocationFailureReason reason = new AllocationFailureReason(host);
var availableHostResources = availableResources.get(host);
reason.violatesParentHostPolicy = violatesParentHostPolicy(node, host, containedAllocations);
NodeResources l = availableHostResources.nodeResources;
NodeResources r = node.flavor().resources();
if (l.vcpu() < r.vcpu()) { reason.insufficientVcpu = true; }
if (l.memoryGb() < r.memoryGb()) { reason.insufficientMemoryGb = true; }
if (l.diskGb() < r.diskGb()) { reason.insufficientDiskGb = true; }
if (r.diskSpeed() != NodeResources.DiskSpeed.any && r.diskSpeed() != l.diskSpeed())
{ reason.incompatibleDiskSpeed = true; }
if (availableHostResources.availableIPs < 1) { reason.insufficientAvailableIPs = true; }
allocationFailureReasons.add(reason);
}
return new AllocationFailureReasonList(allocationFailureReasons);
}
/**
* Contains the list of hosts that, upon being removed, caused an unrecoverable state,
* as well as the specific host and tenant which caused it.
*/
public static class HostFailurePath {
public List<Node> hostsCausingFailure;
public HostRemovalFailure failureReason;
}
/**
* Data class used for detailing why removing the given tenant from the given host was unsuccessful.
* A failure might not be caused by failing to allocate a specific tenant, in which case the fields
* will be empty.
*/
public static class HostRemovalFailure {
public Optional<Node> host;
public Optional<Node> tenant;
public AllocationFailureReasonList allocationFailures;
public static HostRemovalFailure none() {
return new HostRemovalFailure(
Optional.empty(),
Optional.empty(),
new AllocationFailureReasonList(List.of()));
}
public static HostRemovalFailure create(Node host, Node tenant, AllocationFailureReasonList failureReasons) {
return new HostRemovalFailure(
Optional.of(host),
Optional.of(tenant),
failureReasons);
}
private HostRemovalFailure(Optional<Node> host, Optional<Node> tenant, AllocationFailureReasonList allocationFailures) {
this.host = host;
this.tenant = tenant;
this.allocationFailures = allocationFailures;
}
@Override
public String toString() {
if (host.isEmpty() || tenant.isEmpty()) return "No removal candidates exists.";
return String.format(
"Failure to remove host %s" +
"\n\tNo new host found for tenant %s:" +
"\n\t\tSingular Reasons: %s" +
"\n\t\tTotal Reasons: %s",
this.host.get().hostname(),
this.tenant.get().hostname(),
this.allocationFailures.singularReasonFailures().toString(),
this.allocationFailures.toString()
);
}
}
/**
* Used to describe the resources required for a tenant, and available to a host.
*/
private static class AllocationResources {
NodeResources nodeResources;
int availableIPs;
public static AllocationResources from(NodeResources nodeResources) {
return new AllocationResources(nodeResources, 1);
}
public AllocationResources(NodeResources nodeResources, int availableIPs) {
this.nodeResources = nodeResources;
this.availableIPs = availableIPs;
}
public boolean satisfies(AllocationResources other) {
if (!this.nodeResources.satisfies(other.nodeResources)) return false;
return this.availableIPs >= other.availableIPs;
}
public AllocationResources subtract(AllocationResources other) {
return new AllocationResources(this.nodeResources.subtract(other.nodeResources), this.availableIPs - other.availableIPs);
}
}
/**
* Keeps track of the reason why a host rejected an allocation.
*/
private static class AllocationFailureReason {
Node host;
public AllocationFailureReason (Node host) {
this.host = host;
}
public boolean insufficientVcpu = false;
public boolean insufficientMemoryGb = false;
public boolean insufficientDiskGb = false;
public boolean incompatibleDiskSpeed = false;
public boolean insufficientAvailableIPs = false;
public boolean violatesParentHostPolicy = false;
public int numberOfReasons() {
int n = 0;
if (insufficientVcpu) n++;
if (insufficientMemoryGb) n++;
if (insufficientDiskGb) n++;
if (incompatibleDiskSpeed) n++;
if (insufficientAvailableIPs) n++;
if (violatesParentHostPolicy) n++;
return n;
}
@Override
public String toString() {
List<String> reasons = new ArrayList<>();
if (insufficientVcpu) reasons.add("insufficientVcpu");
if (insufficientMemoryGb) reasons.add("insufficientMemoryGb");
if (insufficientDiskGb) reasons.add("insufficientDiskGb");
if (incompatibleDiskSpeed) reasons.add("incompatibleDiskSpeed");
if (insufficientAvailableIPs) reasons.add("insufficientAvailableIPs");
if (violatesParentHostPolicy) reasons.add("violatesParentHostPolicy");
return String.format("[%s]", String.join(", ", reasons));
}
}
/**
* Provides convenient methods for tallying failures.
*/
public static class AllocationFailureReasonList {
private List<AllocationFailureReason> allocationFailureReasons;
public AllocationFailureReasonList(List<AllocationFailureReason> allocationFailureReasons) {
this.allocationFailureReasons = allocationFailureReasons;
}
public long insufficientVcpu() { return allocationFailureReasons.stream().filter(r -> r.insufficientVcpu).count(); }
public long insufficientMemoryGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientMemoryGb).count(); }
public long insufficientDiskGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientDiskGb).count(); }
public long incompatibleDiskSpeed() { return allocationFailureReasons.stream().filter(r -> r.incompatibleDiskSpeed).count(); }
public long insufficientAvailableIps() { return allocationFailureReasons.stream().filter(r -> r.insufficientAvailableIPs).count(); }
public long violatesParentHostPolicy() { return allocationFailureReasons.stream().filter(r -> r.violatesParentHostPolicy).count(); }
public AllocationFailureReasonList singularReasonFailures() {
return new AllocationFailureReasonList(allocationFailureReasons.stream()
.filter(reason -> reason.numberOfReasons() == 1).collect(Collectors.toList()));
}
public AllocationFailureReasonList multipleReasonFailures() {
return new AllocationFailureReasonList(allocationFailureReasons.stream()
.filter(reason -> reason.numberOfReasons() > 1).collect(Collectors.toList()));
}
public long size() {
return allocationFailureReasons.size();
}
@Override
public String toString() {
return String.format("CPU (%3d), Memory (%3d), Disk size (%3d), Disk speed (%3d), IP (%3d), Parent-Host Policy (%3d)",
insufficientVcpu(), insufficientMemoryGb(), insufficientDiskGb(),
incompatibleDiskSpeed(), insufficientAvailableIps(), violatesParentHostPolicy());
}
}
public static class AllocationHistory {
public static class Entry {
public Node tenant;
public Node newParent;
public long eligibleParents;
public Entry(Node tenant, Node newParent, long eligibleParents) {
this.tenant = tenant;
this.newParent = newParent;
this.eligibleParents = eligibleParents;
}
@Override
public String toString() {
return String.format("%-20s %-65s -> %15s [%3d valid]",
tenant.hostname().replaceFirst("\\..+", ""),
tenant.flavor().resources(),
newParent == null ? "x" : newParent.hostname().replaceFirst("\\..+", ""),
this.eligibleParents
);
}
}
public List<Entry> historyEntries;
public AllocationHistory() {
this.historyEntries = new ArrayList<>();
}
public void addEntry(Node tenant, Node newParent, long eligibleParents) {
this.historyEntries.add(new Entry(tenant, newParent, eligibleParents));
}
public Set<String> oldParents() {
Set<String> oldParents = new HashSet<>();
for (var entry : historyEntries)
entry.tenant.parentHostname().ifPresent(oldParents::add);
return oldParents;
}
@Override
public String toString() {
StringBuilder out = new StringBuilder();
String currentParent = "";
for (var entry : historyEntries) {
String parentName = entry.tenant.parentHostname().orElseThrow();
if (!parentName.equals(currentParent)) {
currentParent = parentName;
out.append(parentName).append("\n");
}
out.append(entry.toString()).append("\n");
}
return out.toString();
}
}
} |
... Not sure how I managed to not notice that one 👍 | private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) {
var hosts = root.setArray("hostsToRemove");
hostsToRemove.forEach(h -> hosts.addString(h.hostname()));
CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory;
if (failure.isEmpty()) {
root.setBool("removalPossible", true);
} else {
root.setBool("removalPossible", false);
}
var arr = root.setArray("history");
for (var entry : history.historyEntries) {
var object = arr.addObject();
object.setString("tenant", entry.tenant.hostname());
if (entry.newParent != null) {
object.setString("newParent", entry.newParent.hostname());
}
object.setLong("eligibleParents", entry.eligibleParents);
}
} | if (failure.isEmpty()) { | private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) {
var hosts = root.setArray("hostsToRemove");
hostsToRemove.forEach(h -> hosts.addString(h.hostname()));
CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory;
root.setBool("removalPossible", failure.isEmpty());
var arr = root.setArray("history");
for (var entry : history.historyEntries) {
var object = arr.addObject();
object.setString("tenant", entry.tenant.hostname());
if (entry.newParent != null) {
object.setString("newParent", entry.newParent.hostname());
}
object.setLong("eligibleParents", entry.eligibleParents);
}
} | class HostCapacityResponse extends HttpResponse {
private final StringBuilder text;
private final Slime slime;
private final CapacityChecker capacityChecker;
private final boolean json;
public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) {
super(200);
capacityChecker = new CapacityChecker(nodeRepository);
json = request.getBooleanProperty("json");
String hostsJson = request.getProperty("hosts");
text = new StringBuilder();
slime = new Slime();
Cursor root = slime.setObject();
if (hostsJson != null) {
ObjectMapper om = new ObjectMapper();
String[] hostsArray;
try {
hostsArray = om.readValue(hostsJson, String[].class);
} catch (Exception e) {
throw new IllegalArgumentException(e.getMessage());
}
List<String> hostNames = Arrays.asList(hostsArray);
List<Node> hosts;
try {
hosts = capacityChecker.nodesFromHostnames(hostNames);
} catch (IllegalArgumentException e) {
throw new NotFoundException(e.getMessage());
}
var failure = capacityChecker.findHostRemovalFailure(hosts);
if (failure.isPresent() && failure.get().failureReason.failureReasons.size() == 0) {
root.setBool("removalPossible", false);
error(root, "Removing all hosts is trivially impossible.");
} else {
if (json) hostLossPossibleToSlime(root, failure, hosts);
else hostLossPossibleToText(failure, hosts);
}
} else {
var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure();
if (failurePath.isPresent()) {
if (json) zoneFailurePathToSlime(root, failurePath.get());
else zoneFailurePathToText(failurePath.get());
} else {
error(root, "Node repository contained no hosts.");
}
}
}
private void error(Cursor root, String errorMessage) {
if (json) root.setString("error", errorMessage);
else text.append(errorMessage);
}
private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) {
text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size()));
CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory;
if (failure.isEmpty()) {
text.append("OK\n\n");
text.append(history);
if (history.oldParents().size() != hostsToRemove.size()) {
long emptyHostCount = hostsToRemove.size() - history.oldParents().size();
text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : ""));
}
} else {
text.append("FAILURE\n\n");
text.append(history).append("\n");
text.append(failure.get().failureReason).append("\n\n");
}
}
private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) {
text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n",
capacityChecker.getHosts().size(),
failurePath.hostsCausingFailure.size()));
text.append(capacityChecker.allocationHistory).append("\n");
text.append(failurePath.failureReason);
}
public void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) {
object.setLong("totalHosts", capacityChecker.getHosts().size());
object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size());
failurePath.failureReason.host.ifPresent(host ->
object.setString("failedTenantParent", host.hostname())
);
failurePath.failureReason.tenant.ifPresent(tenant -> {
object.setString("failedTenant", tenant.hostname());
object.setString("failedTenantResources", tenant.flavor().resources().toString());
tenant.allocation().ifPresent(allocation ->
object.setString("failedTenantAllocation", allocation.toString())
);
var explanation = object.setObject("hostCandidateRejectionReasons");
allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"),
failurePath.failureReason.failureReasons.singularReasonFailures());
allocationFailureReasonListToSlime(explanation.setObject("totalFailures"),
failurePath.failureReason.failureReasons);
});
var details = object.setObject("details");
hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure);
}
private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) {
root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu());
root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb());
root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb());
root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed());
root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps());
root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy());
}
@Override
public void render(OutputStream stream) throws IOException {
if (json) new JsonFormat(true).encode(stream, slime);
else stream.write(text.toString().getBytes());
}
@Override
public String getContentType() {
return json ? "application/json" : "text/plain";
}
} | class HostCapacityResponse extends HttpResponse {
private final StringBuilder text;
private final Slime slime;
private final CapacityChecker capacityChecker;
private final boolean json;
public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) {
super(200);
capacityChecker = new CapacityChecker(nodeRepository);
json = request.getBooleanProperty("json");
String hostsJson = request.getProperty("hosts");
text = new StringBuilder();
slime = new Slime();
Cursor root = slime.setObject();
if (hostsJson != null) {
List<Node> hosts = parseHostList(hostsJson);
hostRemovalResponse(root, hosts);
} else {
zoneFailureReponse(root);
}
}
private List<Node> parseHostList(String hosts) {
ObjectMapper om = new ObjectMapper();
String[] hostsArray;
try {
hostsArray = om.readValue(hosts, String[].class);
} catch (Exception e) {
throw new IllegalArgumentException(e.getMessage());
}
List<String> hostNames = Arrays.asList(hostsArray);
try {
return capacityChecker.nodesFromHostnames(hostNames);
} catch (IllegalArgumentException e) {
throw new NotFoundException(e.getMessage());
}
}
private void hostRemovalResponse(Cursor root, List<Node> hosts) {
var failure = capacityChecker.findHostRemovalFailure(hosts);
if (failure.isPresent() && failure.get().failureReason.allocationFailures.size() == 0) {
root.setBool("removalPossible", false);
error(root, "Removing all hosts is trivially impossible.");
} else {
if (json) hostLossPossibleToSlime(root, failure, hosts);
else hostLossPossibleToText(failure, hosts);
}
}
private void zoneFailureReponse(Cursor root) {
var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure();
if (failurePath.isPresent()) {
if (json) zoneFailurePathToSlime(root, failurePath.get());
else zoneFailurePathToText(failurePath.get());
} else {
error(root, "Node repository contained no hosts.");
}
}
private void error(Cursor root, String errorMessage) {
if (json) root.setString("error", errorMessage);
else text.append(errorMessage);
}
private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) {
text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size()));
CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory;
if (failure.isEmpty()) {
text.append("OK\n\n");
text.append(history);
if (history.oldParents().size() != hostsToRemove.size()) {
long emptyHostCount = hostsToRemove.size() - history.oldParents().size();
text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : ""));
}
} else {
text.append("FAILURE\n\n");
text.append(history).append("\n");
text.append(failure.get().failureReason).append("\n\n");
}
}
private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) {
text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n",
capacityChecker.getHosts().size(),
failurePath.hostsCausingFailure.size()));
text.append(capacityChecker.allocationHistory).append("\n");
text.append(failurePath.failureReason);
}
private void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) {
object.setLong("totalHosts", capacityChecker.getHosts().size());
object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size());
failurePath.failureReason.host.ifPresent(host ->
object.setString("failedTenantParent", host.hostname())
);
failurePath.failureReason.tenant.ifPresent(tenant -> {
object.setString("failedTenant", tenant.hostname());
object.setString("failedTenantResources", tenant.flavor().resources().toString());
tenant.allocation().ifPresent(allocation ->
object.setString("failedTenantAllocation", allocation.toString())
);
var explanation = object.setObject("hostCandidateRejectionReasons");
allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"),
failurePath.failureReason.allocationFailures.singularReasonFailures());
allocationFailureReasonListToSlime(explanation.setObject("totalFailures"),
failurePath.failureReason.allocationFailures);
});
var details = object.setObject("details");
hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure);
}
private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) {
root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu());
root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb());
root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb());
root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed());
root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps());
root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy());
}
@Override
public void render(OutputStream stream) throws IOException {
if (json) new JsonFormat(true).encode(stream, slime);
else stream.write(text.toString().getBytes());
}
@Override
public String getContentType() {
return json ? "application/json" : "text/plain";
}
} |
```Java if (failure.isPresent() && failure.get().failureReason.allocationFailureReasons.size() == 0) { ``` More in-depth than this? Alernatively; ```Java if (failure.isPresent() && failure.get().failureReason.allocationFailures.size() == 0) { ``` | public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) {
super(200);
capacityChecker = new CapacityChecker(nodeRepository);
json = request.getBooleanProperty("json");
String hostsJson = request.getProperty("hosts");
text = new StringBuilder();
slime = new Slime();
Cursor root = slime.setObject();
if (hostsJson != null) {
ObjectMapper om = new ObjectMapper();
String[] hostsArray;
try {
hostsArray = om.readValue(hostsJson, String[].class);
} catch (Exception e) {
throw new IllegalArgumentException(e.getMessage());
}
List<String> hostNames = Arrays.asList(hostsArray);
List<Node> hosts;
try {
hosts = capacityChecker.nodesFromHostnames(hostNames);
} catch (IllegalArgumentException e) {
throw new NotFoundException(e.getMessage());
}
var failure = capacityChecker.findHostRemovalFailure(hosts);
if (failure.isPresent() && failure.get().failureReason.failureReasons.size() == 0) {
root.setBool("removalPossible", false);
error(root, "Removing all hosts is trivially impossible.");
} else {
if (json) hostLossPossibleToSlime(root, failure, hosts);
else hostLossPossibleToText(failure, hosts);
}
} else {
var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure();
if (failurePath.isPresent()) {
if (json) zoneFailurePathToSlime(root, failurePath.get());
else zoneFailurePathToText(failurePath.get());
} else {
error(root, "Node repository contained no hosts.");
}
}
} | if (failure.isPresent() && failure.get().failureReason.failureReasons.size() == 0) { | public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) {
super(200);
capacityChecker = new CapacityChecker(nodeRepository);
json = request.getBooleanProperty("json");
String hostsJson = request.getProperty("hosts");
text = new StringBuilder();
slime = new Slime();
Cursor root = slime.setObject();
if (hostsJson != null) {
List<Node> hosts = parseHostList(hostsJson);
hostRemovalResponse(root, hosts);
} else {
zoneFailureReponse(root);
}
} | class HostCapacityResponse extends HttpResponse {
private final StringBuilder text;
private final Slime slime;
private final CapacityChecker capacityChecker;
private final boolean json;
private void error(Cursor root, String errorMessage) {
if (json) root.setString("error", errorMessage);
else text.append(errorMessage);
}
private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) {
text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size()));
CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory;
if (failure.isEmpty()) {
text.append("OK\n\n");
text.append(history);
if (history.oldParents().size() != hostsToRemove.size()) {
long emptyHostCount = hostsToRemove.size() - history.oldParents().size();
text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : ""));
}
} else {
text.append("FAILURE\n\n");
text.append(history).append("\n");
text.append(failure.get().failureReason).append("\n\n");
}
}
private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) {
text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n",
capacityChecker.getHosts().size(),
failurePath.hostsCausingFailure.size()));
text.append(capacityChecker.allocationHistory).append("\n");
text.append(failurePath.failureReason);
}
private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) {
var hosts = root.setArray("hostsToRemove");
hostsToRemove.forEach(h -> hosts.addString(h.hostname()));
CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory;
if (failure.isEmpty()) {
root.setBool("removalPossible", true);
} else {
root.setBool("removalPossible", false);
}
var arr = root.setArray("history");
for (var entry : history.historyEntries) {
var object = arr.addObject();
object.setString("tenant", entry.tenant.hostname());
if (entry.newParent != null) {
object.setString("newParent", entry.newParent.hostname());
}
object.setLong("eligibleParents", entry.eligibleParents);
}
}
public void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) {
object.setLong("totalHosts", capacityChecker.getHosts().size());
object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size());
failurePath.failureReason.host.ifPresent(host ->
object.setString("failedTenantParent", host.hostname())
);
failurePath.failureReason.tenant.ifPresent(tenant -> {
object.setString("failedTenant", tenant.hostname());
object.setString("failedTenantResources", tenant.flavor().resources().toString());
tenant.allocation().ifPresent(allocation ->
object.setString("failedTenantAllocation", allocation.toString())
);
var explanation = object.setObject("hostCandidateRejectionReasons");
allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"),
failurePath.failureReason.failureReasons.singularReasonFailures());
allocationFailureReasonListToSlime(explanation.setObject("totalFailures"),
failurePath.failureReason.failureReasons);
});
var details = object.setObject("details");
hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure);
}
private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) {
root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu());
root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb());
root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb());
root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed());
root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps());
root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy());
}
@Override
public void render(OutputStream stream) throws IOException {
if (json) new JsonFormat(true).encode(stream, slime);
else stream.write(text.toString().getBytes());
}
@Override
public String getContentType() {
return json ? "application/json" : "text/plain";
}
} | class HostCapacityResponse extends HttpResponse {
private final StringBuilder text;
private final Slime slime;
private final CapacityChecker capacityChecker;
private final boolean json;
private List<Node> parseHostList(String hosts) {
ObjectMapper om = new ObjectMapper();
String[] hostsArray;
try {
hostsArray = om.readValue(hosts, String[].class);
} catch (Exception e) {
throw new IllegalArgumentException(e.getMessage());
}
List<String> hostNames = Arrays.asList(hostsArray);
try {
return capacityChecker.nodesFromHostnames(hostNames);
} catch (IllegalArgumentException e) {
throw new NotFoundException(e.getMessage());
}
}
private void hostRemovalResponse(Cursor root, List<Node> hosts) {
var failure = capacityChecker.findHostRemovalFailure(hosts);
if (failure.isPresent() && failure.get().failureReason.allocationFailures.size() == 0) {
root.setBool("removalPossible", false);
error(root, "Removing all hosts is trivially impossible.");
} else {
if (json) hostLossPossibleToSlime(root, failure, hosts);
else hostLossPossibleToText(failure, hosts);
}
}
private void zoneFailureReponse(Cursor root) {
var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure();
if (failurePath.isPresent()) {
if (json) zoneFailurePathToSlime(root, failurePath.get());
else zoneFailurePathToText(failurePath.get());
} else {
error(root, "Node repository contained no hosts.");
}
}
private void error(Cursor root, String errorMessage) {
if (json) root.setString("error", errorMessage);
else text.append(errorMessage);
}
private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) {
text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size()));
CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory;
if (failure.isEmpty()) {
text.append("OK\n\n");
text.append(history);
if (history.oldParents().size() != hostsToRemove.size()) {
long emptyHostCount = hostsToRemove.size() - history.oldParents().size();
text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : ""));
}
} else {
text.append("FAILURE\n\n");
text.append(history).append("\n");
text.append(failure.get().failureReason).append("\n\n");
}
}
private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) {
text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n",
capacityChecker.getHosts().size(),
failurePath.hostsCausingFailure.size()));
text.append(capacityChecker.allocationHistory).append("\n");
text.append(failurePath.failureReason);
}
private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) {
var hosts = root.setArray("hostsToRemove");
hostsToRemove.forEach(h -> hosts.addString(h.hostname()));
CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory;
root.setBool("removalPossible", failure.isEmpty());
var arr = root.setArray("history");
for (var entry : history.historyEntries) {
var object = arr.addObject();
object.setString("tenant", entry.tenant.hostname());
if (entry.newParent != null) {
object.setString("newParent", entry.newParent.hostname());
}
object.setLong("eligibleParents", entry.eligibleParents);
}
}
private void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) {
object.setLong("totalHosts", capacityChecker.getHosts().size());
object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size());
failurePath.failureReason.host.ifPresent(host ->
object.setString("failedTenantParent", host.hostname())
);
failurePath.failureReason.tenant.ifPresent(tenant -> {
object.setString("failedTenant", tenant.hostname());
object.setString("failedTenantResources", tenant.flavor().resources().toString());
tenant.allocation().ifPresent(allocation ->
object.setString("failedTenantAllocation", allocation.toString())
);
var explanation = object.setObject("hostCandidateRejectionReasons");
allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"),
failurePath.failureReason.allocationFailures.singularReasonFailures());
allocationFailureReasonListToSlime(explanation.setObject("totalFailures"),
failurePath.failureReason.allocationFailures);
});
var details = object.setObject("details");
hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure);
}
private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) {
root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu());
root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb());
root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb());
root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed());
root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps());
root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy());
}
@Override
public void render(OutputStream stream) throws IOException {
if (json) new JsonFormat(true).encode(stream, slime);
else stream.write(text.toString().getBytes());
}
@Override
public String getContentType() {
return json ? "application/json" : "text/plain";
}
} |
FYI, I know about many Maven modules in the Vespa repo that is a bundle but does not include jdisc_core. | public void execute() throws MojoExecutionException {
try {
Artifacts.ArtifactSet artifactSet = Artifacts.getArtifacts(project);
warnOnUnsupportedArtifacts(artifactSet.getNonJarArtifacts());
AnalyzeBundle.PublicPackages publicPackagesFromProvidedJars = publicPackagesAggregated(
artifactSet.getJarArtifactsProvided().stream().map(Artifact::getFile).collect(Collectors.toList()));
PackageTally projectPackages = getProjectClassesTally();
PackageTally compileJarsPackages = definedPackages(artifactSet.getJarArtifactsToInclude());
PackageTally includedPackages = projectPackages.combine(compileJarsPackages);
warnIfPackagesDefinedOverlapsGlobalPackages(includedPackages.definedPackages(), publicPackagesFromProvidedJars.globals);
if (getLog().isDebugEnabled()) {
getLog().debug("Referenced packages = " + includedPackages.referencedPackages());
getLog().debug("Defined packages = " + includedPackages.definedPackages());
getLog().debug("Exported packages of dependencies = " + publicPackagesFromProvidedJars.exports.stream()
.map(e -> "(" + e.getPackageNames().toString() + ", " + e.version().orElse("")).collect(Collectors.joining(", ")));
}
if (hasJdiscCoreProvided(artifactSet.getJarArtifactsProvided())) {
logMissingPackages(publicPackagesFromProvidedJars, projectPackages, compileJarsPackages, includedPackages);
} else {
getLog().warn("This project does not have jdisc_core as provided dependency, so the " +
"generated 'Import-Package' OSGi header may be missing important packages.");
}
Map<String, Import> calculatedImports = calculateImports(includedPackages.referencedPackages(),
includedPackages.definedPackages(),
exportsByPackageName(publicPackagesFromProvidedJars.exports));
Map<String, Optional<String>> manualImports = emptyToNone(importPackage).map(GenerateOsgiManifestMojo::getManualImports)
.orElseGet(HashMap::new);
for (String packageName : manualImports.keySet()) {
calculatedImports.remove(packageName);
}
createManifestFile(new File(project.getBuild().getOutputDirectory()), manifestContent(project,
artifactSet.getJarArtifactsToInclude(), manualImports, calculatedImports.values(), includedPackages));
} catch (Exception e) {
throw new MojoExecutionException("Failed generating osgi manifest", e);
}
} | getLog().warn("This project does not have jdisc_core as provided dependency, so the " + | public void execute() throws MojoExecutionException {
try {
Artifacts.ArtifactSet artifactSet = Artifacts.getArtifacts(project);
warnOnUnsupportedArtifacts(artifactSet.getNonJarArtifacts());
AnalyzeBundle.PublicPackages publicPackagesFromProvidedJars = publicPackagesAggregated(
artifactSet.getJarArtifactsProvided().stream().map(Artifact::getFile).collect(Collectors.toList()));
Set<String> exportedPackagesFromProvidedDeps = publicPackagesFromProvidedJars.exportedPackageNames();
PackageTally projectPackages = getProjectClassesTally();
PackageTally compileJarsPackages = definedPackages(artifactSet.getJarArtifactsToInclude());
PackageTally includedPackages = projectPackages.combine(compileJarsPackages);
warnIfPackagesDefinedOverlapsGlobalPackages(includedPackages.definedPackages(), publicPackagesFromProvidedJars.globals);
logDebugPackageSets(publicPackagesFromProvidedJars, includedPackages);
if (hasJdiscCoreProvided(artifactSet.getJarArtifactsProvided())) {
logMissingPackages(exportedPackagesFromProvidedDeps, projectPackages, compileJarsPackages, includedPackages);
} else {
getLog().warn("This project does not have jdisc_core as provided dependency, so the " +
"generated 'Import-Package' OSGi header may be missing important packages.");
}
logOverlappingPackages(projectPackages, exportedPackagesFromProvidedDeps);
logUnnecessaryPackages(compileJarsPackages, exportedPackagesFromProvidedDeps);
Map<String, Import> calculatedImports = calculateImports(includedPackages.referencedPackages(),
includedPackages.definedPackages(),
exportsByPackageName(publicPackagesFromProvidedJars.exports));
Map<String, Optional<String>> manualImports = emptyToNone(importPackage).map(GenerateOsgiManifestMojo::getManualImports)
.orElseGet(HashMap::new);
for (String packageName : manualImports.keySet()) {
calculatedImports.remove(packageName);
}
createManifestFile(new File(project.getBuild().getOutputDirectory()), manifestContent(project,
artifactSet.getJarArtifactsToInclude(), manualImports, calculatedImports.values(), includedPackages));
} catch (Exception e) {
throw new MojoExecutionException("Failed generating osgi manifest", e);
}
} | class GenerateOsgiManifestMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}")
private MavenProject project = null;
@Parameter
private String discApplicationClass = null;
@Parameter
private String discPreInstallBundle = null;
@Parameter(alias = "Bundle-Version", defaultValue = "${project.version}")
private String bundleVersion = null;
@Parameter(alias = "Bundle-SymbolicName", defaultValue = "${project.artifactId}")
private String bundleSymbolicName = null;
@Parameter(alias = "Bundle-Activator")
private String bundleActivator = null;
@Parameter(alias = "X-JDisc-Privileged-Activator")
private String jdiscPrivilegedActivator = null;
@Parameter(alias = "X-Config-Models")
private String configModels = null;
@Parameter(alias = "Import-Package")
private String importPackage = null;
@Parameter(alias = "WebInfUrl")
private String webInfUrl = null;
@Parameter(alias = "Main-Class")
private String mainClass = null;
@Parameter(alias = "X-Jersey-Binding")
private String jerseyBinding = null;
private boolean hasJdiscCoreProvided(List<Artifact> providedArtifacts) {
return providedArtifacts.stream().anyMatch(artifact -> artifact.getArtifactId().equals("jdisc_core"));
}
private void logMissingPackages(AnalyzeBundle.PublicPackages publicPackagesFromProvidedJars, PackageTally projectPackages, PackageTally compileJarPackages, PackageTally includedPackages) {
Set<String> exportedPackagesFromProvidedDeps = publicPackagesFromProvidedJars.exports
.stream()
.map(Export::getPackageNames)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
Set<String> definedAndExportedPackages = Sets.union(includedPackages.definedPackages(), exportedPackagesFromProvidedDeps);
Set<String> missingProjectPackages = missingPackages(projectPackages, definedAndExportedPackages);
if (! missingProjectPackages.isEmpty()) {
getLog().warn("Packages unavailable runtime are referenced from project classes " +
"(annotations can usually be ignored): " + missingProjectPackages);
}
Set<String> missingCompilePackages = missingPackages(compileJarPackages, definedAndExportedPackages);
if (! missingCompilePackages.isEmpty()) {
getLog().info("Packages unavailable runtime are referenced from compile scoped jars " +
"(annotations can usually be ignored): " + missingCompilePackages);
}
}
private static Set<String> missingPackages(PackageTally projectPackages, Set<String> definedAndExportedPackages) {
return Sets.difference(projectPackages.referencedPackages(), definedAndExportedPackages).stream()
.filter(pkg -> !pkg.startsWith("java."))
.collect(Collectors.toSet());
}
private static void warnIfPackagesDefinedOverlapsGlobalPackages(Set<String> internalPackages, List<String> globalPackages)
throws MojoExecutionException {
Set<String> overlap = Sets.intersection(internalPackages, new HashSet<>(globalPackages));
if (! overlap.isEmpty()) {
throw new MojoExecutionException(
"The following packages are both global and included in the bundle:\n " + String.join("\n ", overlap));
}
}
private Collection<String> osgiExportPackages(Map<String, ExportPackageAnnotation> exportedPackages) {
return exportedPackages.entrySet().stream().map(entry -> entry.getKey() + ";version=" + entry.getValue().osgiVersion())
.collect(Collectors.toList());
}
private static String trimWhitespace(Optional<String> lines) {
return Stream.of(lines.orElse("").split(",")).map(String::trim).collect(Collectors.joining(","));
}
private Map<String, String> manifestContent(MavenProject project, Collection<Artifact> jarArtifactsToInclude,
Map<String, Optional<String>> manualImports, Collection<Import> imports, PackageTally pluginPackageTally) {
Map<String, String> ret = new HashMap<>();
String importPackage = Stream.concat(manualImports.entrySet().stream().map(e -> asOsgiImport(e.getKey(), e.getValue())),
imports.stream().map(Import::asOsgiImport)).sorted().collect(Collectors.joining(","));
String exportPackage = osgiExportPackages(pluginPackageTally.exportedPackages()).stream().sorted().collect(Collectors.joining(","));
for (Pair<String, String> element : Arrays.asList(
Pair.of("Created-By", "vespa container maven plugin"),
Pair.of("Bundle-ManifestVersion", "2"),
Pair.of("Bundle-Name", project.getName()),
Pair.of("Bundle-SymbolicName", bundleSymbolicName),
Pair.of("Bundle-Version", asBundleVersion(bundleVersion)),
Pair.of("Bundle-Vendor", "Yahoo!"),
Pair.of("Bundle-ClassPath", bundleClassPath(jarArtifactsToInclude)),
Pair.of("Bundle-Activator", bundleActivator),
Pair.of("X-JDisc-Privileged-Activator", jdiscPrivilegedActivator),
Pair.of("Main-Class", mainClass),
Pair.of("X-JDisc-Application", discApplicationClass),
Pair.of("X-JDisc-Preinstall-Bundle", trimWhitespace(Optional.ofNullable(discPreInstallBundle))),
Pair.of("X-Config-Models", configModels),
Pair.of("X-Jersey-Binding", jerseyBinding),
Pair.of("WebInfUrl", webInfUrl),
Pair.of("Import-Package", importPackage),
Pair.of("Export-Package", exportPackage))) {
if (element.getValue() != null && ! element.getValue().isEmpty()) {
ret.put(element.getKey(), element.getValue());
}
}
return ret;
}
private static String asOsgiImport(String packageName, Optional<String> version) {
return version.map(s -> packageName + ";version=" + quote(s)).orElse(packageName);
}
private static String quote(String s) {
return "\"" + s + "\"";
}
private static void createManifestFile(File outputDirectory, Map<String, String> manifestContent) {
Manifest manifest = toManifest(manifestContent);
withFileOutputStream(new File(outputDirectory, JarFile.MANIFEST_NAME), outputStream -> {
manifest.write(outputStream);
return null;
});
}
private static Manifest toManifest(Map<String, String> manifestContent) {
Manifest manifest = new Manifest();
Attributes mainAttributes = manifest.getMainAttributes();
mainAttributes.put(Attributes.Name.MANIFEST_VERSION, "1.0");
manifestContent.forEach(mainAttributes::putValue);
return manifest;
}
private static String bundleClassPath(Collection<Artifact> artifactsToInclude) {
return Stream.concat(Stream.of("."), artifactsToInclude.stream().map(GenerateOsgiManifestMojo::dependencyPath))
.collect(Collectors.joining(","));
}
private static String dependencyPath(Artifact artifact) {
return "dependencies/" + artifact.getFile().getName();
}
private static String asBundleVersion(String projectVersion) {
if (projectVersion == null) {
throw new IllegalArgumentException("Missing project version.");
}
String[] parts = projectVersion.split("-", 2);
List<String> numericPart = Stream.of(parts[0].split("\\.")).map(s -> Strings.replaceEmptyString(s, "0")).limit(3)
.collect(Collectors.toList());
while (numericPart.size() < 3) {
numericPart.add("0");
}
return String.join(".", numericPart);
}
private void warnOnUnsupportedArtifacts(Collection<Artifact> nonJarArtifacts) {
List<Artifact> unsupportedArtifacts = nonJarArtifacts.stream().filter(a -> ! a.getType().equals("pom"))
.collect(Collectors.toList());
unsupportedArtifacts.forEach(artifact -> getLog()
.warn(String.format("Unsupported artifact '%s': Type '%s' is not supported. Please file a feature request.",
artifact.getId(), artifact.getType())));
}
private PackageTally getProjectClassesTally() {
File outputDirectory = new File(project.getBuild().getOutputDirectory());
List<ClassFileMetaData> analyzedClasses = allDescendantFiles(outputDirectory).filter(file -> file.getName().endsWith(".class"))
.map(Analyze::analyzeClass).collect(Collectors.toList());
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
private static PackageTally definedPackages(Collection<Artifact> jarArtifacts) {
return PackageTally.combine(jarArtifacts.stream().map(ja -> withJarFile(ja.getFile(), GenerateOsgiManifestMojo::definedPackages))
.collect(Collectors.toList()));
}
private static PackageTally definedPackages(JarFile jarFile) throws MojoExecutionException {
List<ClassFileMetaData> analyzedClasses = new ArrayList<>();
for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements();) {
JarEntry entry = entries.nextElement();
if (! entry.isDirectory() && entry.getName().endsWith(".class")) {
analyzedClasses.add(analyzeClass(jarFile, entry));
}
}
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
private static ClassFileMetaData analyzeClass(JarFile jarFile, JarEntry entry) throws MojoExecutionException {
try {
return withInputStream(jarFile, entry, Analyze::analyzeClass);
} catch (Exception e) {
throw new MojoExecutionException(
String.format("While analyzing the class '%s' in jar file '%s'", entry.getName(), jarFile.getName()), e);
}
}
private static Map<String, Optional<String>> getManualImports(String importPackage) {
try {
Map<String, Optional<String>> ret = new HashMap<>();
List<Export> imports = parseImportPackages(importPackage);
for (Export imp : imports) {
Optional<String> version = getVersionThrowOthers(imp.getParameters());
imp.getPackageNames().forEach(pn -> ret.put(pn, version));
}
return ret;
} catch (Exception e) {
throw new RuntimeException("Error in Import-Package:" + importPackage, e);
}
}
private static Optional<String> getVersionThrowOthers(List<ExportPackages.Parameter> parameters) {
if (parameters.size() == 1 && "version".equals(parameters.get(0).getName())) {
return Optional.of(parameters.get(0).getValue());
} else if (parameters.size() == 0) {
return Optional.empty();
} else {
List<String> paramNames = parameters.stream().map(ExportPackages.Parameter::getName).collect(Collectors.toList());
throw new RuntimeException("A single, optional version parameter expected, but got " + paramNames);
}
}
private static List<Export> parseImportPackages(String importPackages) {
return ExportPackageParser.parseExports(importPackages);
}
private static Optional<String> emptyToNone(String str) {
return Optional.ofNullable(str).map(String::trim).filter(s -> ! s.isEmpty());
}
} | class GenerateOsgiManifestMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}")
private MavenProject project = null;
@Parameter
private String discApplicationClass = null;
@Parameter
private String discPreInstallBundle = null;
@Parameter(alias = "Bundle-Version", defaultValue = "${project.version}")
private String bundleVersion = null;
@Parameter(alias = "Bundle-SymbolicName", defaultValue = "${project.artifactId}")
private String bundleSymbolicName = null;
@Parameter(alias = "Bundle-Activator")
private String bundleActivator = null;
@Parameter(alias = "X-JDisc-Privileged-Activator")
private String jdiscPrivilegedActivator = null;
@Parameter(alias = "X-Config-Models")
private String configModels = null;
@Parameter(alias = "Import-Package")
private String importPackage = null;
@Parameter(alias = "WebInfUrl")
private String webInfUrl = null;
@Parameter(alias = "Main-Class")
private String mainClass = null;
@Parameter(alias = "X-Jersey-Binding")
private String jerseyBinding = null;
private void logDebugPackageSets(AnalyzeBundle.PublicPackages publicPackagesFromProvidedJars, PackageTally includedPackages) {
if (getLog().isDebugEnabled()) {
getLog().debug("Referenced packages = " + includedPackages.referencedPackages());
getLog().debug("Defined packages = " + includedPackages.definedPackages());
getLog().debug("Exported packages of dependencies = " + publicPackagesFromProvidedJars.exports.stream()
.map(e -> "(" + e.getPackageNames().toString() + ", " + e.version().orElse("")).collect(Collectors.joining(", ")));
}
}
private boolean hasJdiscCoreProvided(List<Artifact> providedArtifacts) {
return providedArtifacts.stream().anyMatch(artifact -> artifact.getArtifactId().equals("jdisc_core"));
}
private void logMissingPackages(Set<String> exportedPackagesFromProvidedJars,
PackageTally projectPackages,
PackageTally compileJarPackages,
PackageTally includedPackages) {
Set<String> definedAndExportedPackages = Sets.union(includedPackages.definedPackages(), exportedPackagesFromProvidedJars);
Set<String> missingProjectPackages = projectPackages.referencedPackagesMissingFrom(definedAndExportedPackages);
if (! missingProjectPackages.isEmpty()) {
getLog().warn("Packages unavailable runtime are referenced from project classes " +
"(annotations can usually be ignored): " + missingProjectPackages);
}
Set<String> missingCompilePackages = compileJarPackages.referencedPackagesMissingFrom(definedAndExportedPackages);
if (! missingCompilePackages.isEmpty()) {
getLog().info("Packages unavailable runtime are referenced from compile scoped jars " +
"(annotations can usually be ignored): " + missingCompilePackages);
}
}
private void logOverlappingPackages(PackageTally projectPackages,
Set<String> exportedPackagesFromProvidedDeps) {
Set<String> overlappingProjectPackages = Sets.intersection(projectPackages.definedPackages(), exportedPackagesFromProvidedDeps);
if (! overlappingProjectPackages.isEmpty()) {
getLog().warn("Project classes use the following packages that are already defined in provided scoped dependencies: "
+ overlappingProjectPackages);
}
}
/*
* This mostly detects packages re-exported via composite bundles like jdisc_core and container-disc.
* An artifact can only be represented once, either in compile or provided scope. So if the project
* adds an artifact in compile scope that we deploy as a pre-installed bundle, we won't see the same
* artifact as provided via container-dev and hence can't detect the duplicate packages.
*/
private void logUnnecessaryPackages(PackageTally compileJarsPackages,
Set<String> exportedPackagesFromProvidedDeps) {
Set<String> unnecessaryPackages = Sets.intersection(compileJarsPackages.definedPackages(), exportedPackagesFromProvidedDeps);
if (! unnecessaryPackages.isEmpty()) {
getLog().info("Compile scoped jars contain the following packages that are most likely " +
"available from jdisc runtime: " + unnecessaryPackages);
}
}
private static void warnIfPackagesDefinedOverlapsGlobalPackages(Set<String> internalPackages, List<String> globalPackages)
throws MojoExecutionException {
Set<String> overlap = Sets.intersection(internalPackages, new HashSet<>(globalPackages));
if (! overlap.isEmpty()) {
throw new MojoExecutionException(
"The following packages are both global and included in the bundle:\n " + String.join("\n ", overlap));
}
}
private Collection<String> osgiExportPackages(Map<String, ExportPackageAnnotation> exportedPackages) {
return exportedPackages.entrySet().stream().map(entry -> entry.getKey() + ";version=" + entry.getValue().osgiVersion())
.collect(Collectors.toList());
}
private static String trimWhitespace(Optional<String> lines) {
return Stream.of(lines.orElse("").split(",")).map(String::trim).collect(Collectors.joining(","));
}
private Map<String, String> manifestContent(MavenProject project, Collection<Artifact> jarArtifactsToInclude,
Map<String, Optional<String>> manualImports, Collection<Import> imports, PackageTally pluginPackageTally) {
Map<String, String> ret = new HashMap<>();
String importPackage = Stream.concat(manualImports.entrySet().stream().map(e -> asOsgiImport(e.getKey(), e.getValue())),
imports.stream().map(Import::asOsgiImport)).sorted().collect(Collectors.joining(","));
String exportPackage = osgiExportPackages(pluginPackageTally.exportedPackages()).stream().sorted().collect(Collectors.joining(","));
for (Pair<String, String> element : Arrays.asList(
Pair.of("Created-By", "vespa container maven plugin"),
Pair.of("Bundle-ManifestVersion", "2"),
Pair.of("Bundle-Name", project.getName()),
Pair.of("Bundle-SymbolicName", bundleSymbolicName),
Pair.of("Bundle-Version", asBundleVersion(bundleVersion)),
Pair.of("Bundle-Vendor", "Yahoo!"),
Pair.of("Bundle-ClassPath", bundleClassPath(jarArtifactsToInclude)),
Pair.of("Bundle-Activator", bundleActivator),
Pair.of("X-JDisc-Privileged-Activator", jdiscPrivilegedActivator),
Pair.of("Main-Class", mainClass),
Pair.of("X-JDisc-Application", discApplicationClass),
Pair.of("X-JDisc-Preinstall-Bundle", trimWhitespace(Optional.ofNullable(discPreInstallBundle))),
Pair.of("X-Config-Models", configModels),
Pair.of("X-Jersey-Binding", jerseyBinding),
Pair.of("WebInfUrl", webInfUrl),
Pair.of("Import-Package", importPackage),
Pair.of("Export-Package", exportPackage))) {
if (element.getValue() != null && ! element.getValue().isEmpty()) {
ret.put(element.getKey(), element.getValue());
}
}
return ret;
}
private static String asOsgiImport(String packageName, Optional<String> version) {
return version.map(s -> packageName + ";version=" + quote(s)).orElse(packageName);
}
private static String quote(String s) {
return "\"" + s + "\"";
}
private static void createManifestFile(File outputDirectory, Map<String, String> manifestContent) {
Manifest manifest = toManifest(manifestContent);
withFileOutputStream(new File(outputDirectory, JarFile.MANIFEST_NAME), outputStream -> {
manifest.write(outputStream);
return null;
});
}
private static Manifest toManifest(Map<String, String> manifestContent) {
Manifest manifest = new Manifest();
Attributes mainAttributes = manifest.getMainAttributes();
mainAttributes.put(Attributes.Name.MANIFEST_VERSION, "1.0");
manifestContent.forEach(mainAttributes::putValue);
return manifest;
}
private static String bundleClassPath(Collection<Artifact> artifactsToInclude) {
return Stream.concat(Stream.of("."), artifactsToInclude.stream().map(GenerateOsgiManifestMojo::dependencyPath))
.collect(Collectors.joining(","));
}
private static String dependencyPath(Artifact artifact) {
return "dependencies/" + artifact.getFile().getName();
}
private static String asBundleVersion(String projectVersion) {
if (projectVersion == null) {
throw new IllegalArgumentException("Missing project version.");
}
String[] parts = projectVersion.split("-", 2);
List<String> numericPart = Stream.of(parts[0].split("\\.")).map(s -> Strings.replaceEmptyString(s, "0")).limit(3)
.collect(Collectors.toList());
while (numericPart.size() < 3) {
numericPart.add("0");
}
return String.join(".", numericPart);
}
private void warnOnUnsupportedArtifacts(Collection<Artifact> nonJarArtifacts) {
List<Artifact> unsupportedArtifacts = nonJarArtifacts.stream().filter(a -> ! a.getType().equals("pom"))
.collect(Collectors.toList());
unsupportedArtifacts.forEach(artifact -> getLog()
.warn(String.format("Unsupported artifact '%s': Type '%s' is not supported. Please file a feature request.",
artifact.getId(), artifact.getType())));
}
private PackageTally getProjectClassesTally() {
File outputDirectory = new File(project.getBuild().getOutputDirectory());
List<ClassFileMetaData> analyzedClasses = allDescendantFiles(outputDirectory).filter(file -> file.getName().endsWith(".class"))
.map(Analyze::analyzeClass).collect(Collectors.toList());
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
private static PackageTally definedPackages(Collection<Artifact> jarArtifacts) {
return PackageTally.combine(jarArtifacts.stream().map(ja -> withJarFile(ja.getFile(), GenerateOsgiManifestMojo::definedPackages))
.collect(Collectors.toList()));
}
private static PackageTally definedPackages(JarFile jarFile) throws MojoExecutionException {
List<ClassFileMetaData> analyzedClasses = new ArrayList<>();
for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements();) {
JarEntry entry = entries.nextElement();
if (! entry.isDirectory() && entry.getName().endsWith(".class")) {
analyzedClasses.add(analyzeClass(jarFile, entry));
}
}
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
private static ClassFileMetaData analyzeClass(JarFile jarFile, JarEntry entry) throws MojoExecutionException {
try {
return withInputStream(jarFile, entry, Analyze::analyzeClass);
} catch (Exception e) {
throw new MojoExecutionException(
String.format("While analyzing the class '%s' in jar file '%s'", entry.getName(), jarFile.getName()), e);
}
}
private static Map<String, Optional<String>> getManualImports(String importPackage) {
try {
Map<String, Optional<String>> ret = new HashMap<>();
List<Export> imports = parseImportPackages(importPackage);
for (Export imp : imports) {
Optional<String> version = getVersionThrowOthers(imp.getParameters());
imp.getPackageNames().forEach(pn -> ret.put(pn, version));
}
return ret;
} catch (Exception e) {
throw new RuntimeException("Error in Import-Package:" + importPackage, e);
}
}
private static Optional<String> getVersionThrowOthers(List<ExportPackages.Parameter> parameters) {
if (parameters.size() == 1 && "version".equals(parameters.get(0).getName())) {
return Optional.of(parameters.get(0).getValue());
} else if (parameters.size() == 0) {
return Optional.empty();
} else {
List<String> paramNames = parameters.stream().map(ExportPackages.Parameter::getName).collect(Collectors.toList());
throw new RuntimeException("A single, optional version parameter expected, but got " + paramNames);
}
}
private static List<Export> parseImportPackages(String importPackages) {
return ExportPackageParser.parseExports(importPackages);
}
private static Optional<String> emptyToNone(String str) {
return Optional.ofNullable(str).map(String::trim).filter(s -> ! s.isEmpty());
}
} |
Almost all internal bundles depend on `container-dev` which pulls in `jdisc_core`. AFAIK, only yolean and security-utils do not, although there could be a few more. For bundles in general (and user bundles in particular) they must have jdisc_core to get necessary Import-Packages. | public void execute() throws MojoExecutionException {
try {
Artifacts.ArtifactSet artifactSet = Artifacts.getArtifacts(project);
warnOnUnsupportedArtifacts(artifactSet.getNonJarArtifacts());
AnalyzeBundle.PublicPackages publicPackagesFromProvidedJars = publicPackagesAggregated(
artifactSet.getJarArtifactsProvided().stream().map(Artifact::getFile).collect(Collectors.toList()));
PackageTally projectPackages = getProjectClassesTally();
PackageTally compileJarsPackages = definedPackages(artifactSet.getJarArtifactsToInclude());
PackageTally includedPackages = projectPackages.combine(compileJarsPackages);
warnIfPackagesDefinedOverlapsGlobalPackages(includedPackages.definedPackages(), publicPackagesFromProvidedJars.globals);
if (getLog().isDebugEnabled()) {
getLog().debug("Referenced packages = " + includedPackages.referencedPackages());
getLog().debug("Defined packages = " + includedPackages.definedPackages());
getLog().debug("Exported packages of dependencies = " + publicPackagesFromProvidedJars.exports.stream()
.map(e -> "(" + e.getPackageNames().toString() + ", " + e.version().orElse("")).collect(Collectors.joining(", ")));
}
if (hasJdiscCoreProvided(artifactSet.getJarArtifactsProvided())) {
logMissingPackages(publicPackagesFromProvidedJars, projectPackages, compileJarsPackages, includedPackages);
} else {
getLog().warn("This project does not have jdisc_core as provided dependency, so the " +
"generated 'Import-Package' OSGi header may be missing important packages.");
}
Map<String, Import> calculatedImports = calculateImports(includedPackages.referencedPackages(),
includedPackages.definedPackages(),
exportsByPackageName(publicPackagesFromProvidedJars.exports));
Map<String, Optional<String>> manualImports = emptyToNone(importPackage).map(GenerateOsgiManifestMojo::getManualImports)
.orElseGet(HashMap::new);
for (String packageName : manualImports.keySet()) {
calculatedImports.remove(packageName);
}
createManifestFile(new File(project.getBuild().getOutputDirectory()), manifestContent(project,
artifactSet.getJarArtifactsToInclude(), manualImports, calculatedImports.values(), includedPackages));
} catch (Exception e) {
throw new MojoExecutionException("Failed generating osgi manifest", e);
}
} | getLog().warn("This project does not have jdisc_core as provided dependency, so the " + | public void execute() throws MojoExecutionException {
try {
Artifacts.ArtifactSet artifactSet = Artifacts.getArtifacts(project);
warnOnUnsupportedArtifacts(artifactSet.getNonJarArtifacts());
AnalyzeBundle.PublicPackages publicPackagesFromProvidedJars = publicPackagesAggregated(
artifactSet.getJarArtifactsProvided().stream().map(Artifact::getFile).collect(Collectors.toList()));
Set<String> exportedPackagesFromProvidedDeps = publicPackagesFromProvidedJars.exportedPackageNames();
PackageTally projectPackages = getProjectClassesTally();
PackageTally compileJarsPackages = definedPackages(artifactSet.getJarArtifactsToInclude());
PackageTally includedPackages = projectPackages.combine(compileJarsPackages);
warnIfPackagesDefinedOverlapsGlobalPackages(includedPackages.definedPackages(), publicPackagesFromProvidedJars.globals);
logDebugPackageSets(publicPackagesFromProvidedJars, includedPackages);
if (hasJdiscCoreProvided(artifactSet.getJarArtifactsProvided())) {
logMissingPackages(exportedPackagesFromProvidedDeps, projectPackages, compileJarsPackages, includedPackages);
} else {
getLog().warn("This project does not have jdisc_core as provided dependency, so the " +
"generated 'Import-Package' OSGi header may be missing important packages.");
}
logOverlappingPackages(projectPackages, exportedPackagesFromProvidedDeps);
logUnnecessaryPackages(compileJarsPackages, exportedPackagesFromProvidedDeps);
Map<String, Import> calculatedImports = calculateImports(includedPackages.referencedPackages(),
includedPackages.definedPackages(),
exportsByPackageName(publicPackagesFromProvidedJars.exports));
Map<String, Optional<String>> manualImports = emptyToNone(importPackage).map(GenerateOsgiManifestMojo::getManualImports)
.orElseGet(HashMap::new);
for (String packageName : manualImports.keySet()) {
calculatedImports.remove(packageName);
}
createManifestFile(new File(project.getBuild().getOutputDirectory()), manifestContent(project,
artifactSet.getJarArtifactsToInclude(), manualImports, calculatedImports.values(), includedPackages));
} catch (Exception e) {
throw new MojoExecutionException("Failed generating osgi manifest", e);
}
} | class GenerateOsgiManifestMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}")
private MavenProject project = null;
@Parameter
private String discApplicationClass = null;
@Parameter
private String discPreInstallBundle = null;
@Parameter(alias = "Bundle-Version", defaultValue = "${project.version}")
private String bundleVersion = null;
@Parameter(alias = "Bundle-SymbolicName", defaultValue = "${project.artifactId}")
private String bundleSymbolicName = null;
@Parameter(alias = "Bundle-Activator")
private String bundleActivator = null;
@Parameter(alias = "X-JDisc-Privileged-Activator")
private String jdiscPrivilegedActivator = null;
@Parameter(alias = "X-Config-Models")
private String configModels = null;
@Parameter(alias = "Import-Package")
private String importPackage = null;
@Parameter(alias = "WebInfUrl")
private String webInfUrl = null;
@Parameter(alias = "Main-Class")
private String mainClass = null;
@Parameter(alias = "X-Jersey-Binding")
private String jerseyBinding = null;
private boolean hasJdiscCoreProvided(List<Artifact> providedArtifacts) {
return providedArtifacts.stream().anyMatch(artifact -> artifact.getArtifactId().equals("jdisc_core"));
}
private void logMissingPackages(AnalyzeBundle.PublicPackages publicPackagesFromProvidedJars, PackageTally projectPackages, PackageTally compileJarPackages, PackageTally includedPackages) {
Set<String> exportedPackagesFromProvidedDeps = publicPackagesFromProvidedJars.exports
.stream()
.map(Export::getPackageNames)
.flatMap(Collection::stream)
.collect(Collectors.toSet());
Set<String> definedAndExportedPackages = Sets.union(includedPackages.definedPackages(), exportedPackagesFromProvidedDeps);
Set<String> missingProjectPackages = missingPackages(projectPackages, definedAndExportedPackages);
if (! missingProjectPackages.isEmpty()) {
getLog().warn("Packages unavailable runtime are referenced from project classes " +
"(annotations can usually be ignored): " + missingProjectPackages);
}
Set<String> missingCompilePackages = missingPackages(compileJarPackages, definedAndExportedPackages);
if (! missingCompilePackages.isEmpty()) {
getLog().info("Packages unavailable runtime are referenced from compile scoped jars " +
"(annotations can usually be ignored): " + missingCompilePackages);
}
}
private static Set<String> missingPackages(PackageTally projectPackages, Set<String> definedAndExportedPackages) {
return Sets.difference(projectPackages.referencedPackages(), definedAndExportedPackages).stream()
.filter(pkg -> !pkg.startsWith("java."))
.collect(Collectors.toSet());
}
private static void warnIfPackagesDefinedOverlapsGlobalPackages(Set<String> internalPackages, List<String> globalPackages)
throws MojoExecutionException {
Set<String> overlap = Sets.intersection(internalPackages, new HashSet<>(globalPackages));
if (! overlap.isEmpty()) {
throw new MojoExecutionException(
"The following packages are both global and included in the bundle:\n " + String.join("\n ", overlap));
}
}
private Collection<String> osgiExportPackages(Map<String, ExportPackageAnnotation> exportedPackages) {
return exportedPackages.entrySet().stream().map(entry -> entry.getKey() + ";version=" + entry.getValue().osgiVersion())
.collect(Collectors.toList());
}
private static String trimWhitespace(Optional<String> lines) {
return Stream.of(lines.orElse("").split(",")).map(String::trim).collect(Collectors.joining(","));
}
private Map<String, String> manifestContent(MavenProject project, Collection<Artifact> jarArtifactsToInclude,
Map<String, Optional<String>> manualImports, Collection<Import> imports, PackageTally pluginPackageTally) {
Map<String, String> ret = new HashMap<>();
String importPackage = Stream.concat(manualImports.entrySet().stream().map(e -> asOsgiImport(e.getKey(), e.getValue())),
imports.stream().map(Import::asOsgiImport)).sorted().collect(Collectors.joining(","));
String exportPackage = osgiExportPackages(pluginPackageTally.exportedPackages()).stream().sorted().collect(Collectors.joining(","));
for (Pair<String, String> element : Arrays.asList(
Pair.of("Created-By", "vespa container maven plugin"),
Pair.of("Bundle-ManifestVersion", "2"),
Pair.of("Bundle-Name", project.getName()),
Pair.of("Bundle-SymbolicName", bundleSymbolicName),
Pair.of("Bundle-Version", asBundleVersion(bundleVersion)),
Pair.of("Bundle-Vendor", "Yahoo!"),
Pair.of("Bundle-ClassPath", bundleClassPath(jarArtifactsToInclude)),
Pair.of("Bundle-Activator", bundleActivator),
Pair.of("X-JDisc-Privileged-Activator", jdiscPrivilegedActivator),
Pair.of("Main-Class", mainClass),
Pair.of("X-JDisc-Application", discApplicationClass),
Pair.of("X-JDisc-Preinstall-Bundle", trimWhitespace(Optional.ofNullable(discPreInstallBundle))),
Pair.of("X-Config-Models", configModels),
Pair.of("X-Jersey-Binding", jerseyBinding),
Pair.of("WebInfUrl", webInfUrl),
Pair.of("Import-Package", importPackage),
Pair.of("Export-Package", exportPackage))) {
if (element.getValue() != null && ! element.getValue().isEmpty()) {
ret.put(element.getKey(), element.getValue());
}
}
return ret;
}
private static String asOsgiImport(String packageName, Optional<String> version) {
return version.map(s -> packageName + ";version=" + quote(s)).orElse(packageName);
}
private static String quote(String s) {
return "\"" + s + "\"";
}
private static void createManifestFile(File outputDirectory, Map<String, String> manifestContent) {
Manifest manifest = toManifest(manifestContent);
withFileOutputStream(new File(outputDirectory, JarFile.MANIFEST_NAME), outputStream -> {
manifest.write(outputStream);
return null;
});
}
private static Manifest toManifest(Map<String, String> manifestContent) {
Manifest manifest = new Manifest();
Attributes mainAttributes = manifest.getMainAttributes();
mainAttributes.put(Attributes.Name.MANIFEST_VERSION, "1.0");
manifestContent.forEach(mainAttributes::putValue);
return manifest;
}
private static String bundleClassPath(Collection<Artifact> artifactsToInclude) {
return Stream.concat(Stream.of("."), artifactsToInclude.stream().map(GenerateOsgiManifestMojo::dependencyPath))
.collect(Collectors.joining(","));
}
private static String dependencyPath(Artifact artifact) {
return "dependencies/" + artifact.getFile().getName();
}
private static String asBundleVersion(String projectVersion) {
if (projectVersion == null) {
throw new IllegalArgumentException("Missing project version.");
}
String[] parts = projectVersion.split("-", 2);
List<String> numericPart = Stream.of(parts[0].split("\\.")).map(s -> Strings.replaceEmptyString(s, "0")).limit(3)
.collect(Collectors.toList());
while (numericPart.size() < 3) {
numericPart.add("0");
}
return String.join(".", numericPart);
}
private void warnOnUnsupportedArtifacts(Collection<Artifact> nonJarArtifacts) {
List<Artifact> unsupportedArtifacts = nonJarArtifacts.stream().filter(a -> ! a.getType().equals("pom"))
.collect(Collectors.toList());
unsupportedArtifacts.forEach(artifact -> getLog()
.warn(String.format("Unsupported artifact '%s': Type '%s' is not supported. Please file a feature request.",
artifact.getId(), artifact.getType())));
}
private PackageTally getProjectClassesTally() {
File outputDirectory = new File(project.getBuild().getOutputDirectory());
List<ClassFileMetaData> analyzedClasses = allDescendantFiles(outputDirectory).filter(file -> file.getName().endsWith(".class"))
.map(Analyze::analyzeClass).collect(Collectors.toList());
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
private static PackageTally definedPackages(Collection<Artifact> jarArtifacts) {
return PackageTally.combine(jarArtifacts.stream().map(ja -> withJarFile(ja.getFile(), GenerateOsgiManifestMojo::definedPackages))
.collect(Collectors.toList()));
}
private static PackageTally definedPackages(JarFile jarFile) throws MojoExecutionException {
List<ClassFileMetaData> analyzedClasses = new ArrayList<>();
for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements();) {
JarEntry entry = entries.nextElement();
if (! entry.isDirectory() && entry.getName().endsWith(".class")) {
analyzedClasses.add(analyzeClass(jarFile, entry));
}
}
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
private static ClassFileMetaData analyzeClass(JarFile jarFile, JarEntry entry) throws MojoExecutionException {
try {
return withInputStream(jarFile, entry, Analyze::analyzeClass);
} catch (Exception e) {
throw new MojoExecutionException(
String.format("While analyzing the class '%s' in jar file '%s'", entry.getName(), jarFile.getName()), e);
}
}
private static Map<String, Optional<String>> getManualImports(String importPackage) {
try {
Map<String, Optional<String>> ret = new HashMap<>();
List<Export> imports = parseImportPackages(importPackage);
for (Export imp : imports) {
Optional<String> version = getVersionThrowOthers(imp.getParameters());
imp.getPackageNames().forEach(pn -> ret.put(pn, version));
}
return ret;
} catch (Exception e) {
throw new RuntimeException("Error in Import-Package:" + importPackage, e);
}
}
private static Optional<String> getVersionThrowOthers(List<ExportPackages.Parameter> parameters) {
if (parameters.size() == 1 && "version".equals(parameters.get(0).getName())) {
return Optional.of(parameters.get(0).getValue());
} else if (parameters.size() == 0) {
return Optional.empty();
} else {
List<String> paramNames = parameters.stream().map(ExportPackages.Parameter::getName).collect(Collectors.toList());
throw new RuntimeException("A single, optional version parameter expected, but got " + paramNames);
}
}
private static List<Export> parseImportPackages(String importPackages) {
return ExportPackageParser.parseExports(importPackages);
}
private static Optional<String> emptyToNone(String str) {
return Optional.ofNullable(str).map(String::trim).filter(s -> ! s.isEmpty());
}
} | class GenerateOsgiManifestMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}")
private MavenProject project = null;
@Parameter
private String discApplicationClass = null;
@Parameter
private String discPreInstallBundle = null;
@Parameter(alias = "Bundle-Version", defaultValue = "${project.version}")
private String bundleVersion = null;
@Parameter(alias = "Bundle-SymbolicName", defaultValue = "${project.artifactId}")
private String bundleSymbolicName = null;
@Parameter(alias = "Bundle-Activator")
private String bundleActivator = null;
@Parameter(alias = "X-JDisc-Privileged-Activator")
private String jdiscPrivilegedActivator = null;
@Parameter(alias = "X-Config-Models")
private String configModels = null;
@Parameter(alias = "Import-Package")
private String importPackage = null;
@Parameter(alias = "WebInfUrl")
private String webInfUrl = null;
@Parameter(alias = "Main-Class")
private String mainClass = null;
@Parameter(alias = "X-Jersey-Binding")
private String jerseyBinding = null;
private void logDebugPackageSets(AnalyzeBundle.PublicPackages publicPackagesFromProvidedJars, PackageTally includedPackages) {
if (getLog().isDebugEnabled()) {
getLog().debug("Referenced packages = " + includedPackages.referencedPackages());
getLog().debug("Defined packages = " + includedPackages.definedPackages());
getLog().debug("Exported packages of dependencies = " + publicPackagesFromProvidedJars.exports.stream()
.map(e -> "(" + e.getPackageNames().toString() + ", " + e.version().orElse("")).collect(Collectors.joining(", ")));
}
}
private boolean hasJdiscCoreProvided(List<Artifact> providedArtifacts) {
return providedArtifacts.stream().anyMatch(artifact -> artifact.getArtifactId().equals("jdisc_core"));
}
private void logMissingPackages(Set<String> exportedPackagesFromProvidedJars,
PackageTally projectPackages,
PackageTally compileJarPackages,
PackageTally includedPackages) {
Set<String> definedAndExportedPackages = Sets.union(includedPackages.definedPackages(), exportedPackagesFromProvidedJars);
Set<String> missingProjectPackages = projectPackages.referencedPackagesMissingFrom(definedAndExportedPackages);
if (! missingProjectPackages.isEmpty()) {
getLog().warn("Packages unavailable runtime are referenced from project classes " +
"(annotations can usually be ignored): " + missingProjectPackages);
}
Set<String> missingCompilePackages = compileJarPackages.referencedPackagesMissingFrom(definedAndExportedPackages);
if (! missingCompilePackages.isEmpty()) {
getLog().info("Packages unavailable runtime are referenced from compile scoped jars " +
"(annotations can usually be ignored): " + missingCompilePackages);
}
}
private void logOverlappingPackages(PackageTally projectPackages,
Set<String> exportedPackagesFromProvidedDeps) {
Set<String> overlappingProjectPackages = Sets.intersection(projectPackages.definedPackages(), exportedPackagesFromProvidedDeps);
if (! overlappingProjectPackages.isEmpty()) {
getLog().warn("Project classes use the following packages that are already defined in provided scoped dependencies: "
+ overlappingProjectPackages);
}
}
/*
* This mostly detects packages re-exported via composite bundles like jdisc_core and container-disc.
* An artifact can only be represented once, either in compile or provided scope. So if the project
* adds an artifact in compile scope that we deploy as a pre-installed bundle, we won't see the same
* artifact as provided via container-dev and hence can't detect the duplicate packages.
*/
private void logUnnecessaryPackages(PackageTally compileJarsPackages,
Set<String> exportedPackagesFromProvidedDeps) {
Set<String> unnecessaryPackages = Sets.intersection(compileJarsPackages.definedPackages(), exportedPackagesFromProvidedDeps);
if (! unnecessaryPackages.isEmpty()) {
getLog().info("Compile scoped jars contain the following packages that are most likely " +
"available from jdisc runtime: " + unnecessaryPackages);
}
}
private static void warnIfPackagesDefinedOverlapsGlobalPackages(Set<String> internalPackages, List<String> globalPackages)
throws MojoExecutionException {
Set<String> overlap = Sets.intersection(internalPackages, new HashSet<>(globalPackages));
if (! overlap.isEmpty()) {
throw new MojoExecutionException(
"The following packages are both global and included in the bundle:\n " + String.join("\n ", overlap));
}
}
private Collection<String> osgiExportPackages(Map<String, ExportPackageAnnotation> exportedPackages) {
return exportedPackages.entrySet().stream().map(entry -> entry.getKey() + ";version=" + entry.getValue().osgiVersion())
.collect(Collectors.toList());
}
private static String trimWhitespace(Optional<String> lines) {
return Stream.of(lines.orElse("").split(",")).map(String::trim).collect(Collectors.joining(","));
}
private Map<String, String> manifestContent(MavenProject project, Collection<Artifact> jarArtifactsToInclude,
Map<String, Optional<String>> manualImports, Collection<Import> imports, PackageTally pluginPackageTally) {
Map<String, String> ret = new HashMap<>();
String importPackage = Stream.concat(manualImports.entrySet().stream().map(e -> asOsgiImport(e.getKey(), e.getValue())),
imports.stream().map(Import::asOsgiImport)).sorted().collect(Collectors.joining(","));
String exportPackage = osgiExportPackages(pluginPackageTally.exportedPackages()).stream().sorted().collect(Collectors.joining(","));
for (Pair<String, String> element : Arrays.asList(
Pair.of("Created-By", "vespa container maven plugin"),
Pair.of("Bundle-ManifestVersion", "2"),
Pair.of("Bundle-Name", project.getName()),
Pair.of("Bundle-SymbolicName", bundleSymbolicName),
Pair.of("Bundle-Version", asBundleVersion(bundleVersion)),
Pair.of("Bundle-Vendor", "Yahoo!"),
Pair.of("Bundle-ClassPath", bundleClassPath(jarArtifactsToInclude)),
Pair.of("Bundle-Activator", bundleActivator),
Pair.of("X-JDisc-Privileged-Activator", jdiscPrivilegedActivator),
Pair.of("Main-Class", mainClass),
Pair.of("X-JDisc-Application", discApplicationClass),
Pair.of("X-JDisc-Preinstall-Bundle", trimWhitespace(Optional.ofNullable(discPreInstallBundle))),
Pair.of("X-Config-Models", configModels),
Pair.of("X-Jersey-Binding", jerseyBinding),
Pair.of("WebInfUrl", webInfUrl),
Pair.of("Import-Package", importPackage),
Pair.of("Export-Package", exportPackage))) {
if (element.getValue() != null && ! element.getValue().isEmpty()) {
ret.put(element.getKey(), element.getValue());
}
}
return ret;
}
private static String asOsgiImport(String packageName, Optional<String> version) {
return version.map(s -> packageName + ";version=" + quote(s)).orElse(packageName);
}
private static String quote(String s) {
return "\"" + s + "\"";
}
private static void createManifestFile(File outputDirectory, Map<String, String> manifestContent) {
Manifest manifest = toManifest(manifestContent);
withFileOutputStream(new File(outputDirectory, JarFile.MANIFEST_NAME), outputStream -> {
manifest.write(outputStream);
return null;
});
}
private static Manifest toManifest(Map<String, String> manifestContent) {
Manifest manifest = new Manifest();
Attributes mainAttributes = manifest.getMainAttributes();
mainAttributes.put(Attributes.Name.MANIFEST_VERSION, "1.0");
manifestContent.forEach(mainAttributes::putValue);
return manifest;
}
private static String bundleClassPath(Collection<Artifact> artifactsToInclude) {
return Stream.concat(Stream.of("."), artifactsToInclude.stream().map(GenerateOsgiManifestMojo::dependencyPath))
.collect(Collectors.joining(","));
}
private static String dependencyPath(Artifact artifact) {
return "dependencies/" + artifact.getFile().getName();
}
private static String asBundleVersion(String projectVersion) {
if (projectVersion == null) {
throw new IllegalArgumentException("Missing project version.");
}
String[] parts = projectVersion.split("-", 2);
List<String> numericPart = Stream.of(parts[0].split("\\.")).map(s -> Strings.replaceEmptyString(s, "0")).limit(3)
.collect(Collectors.toList());
while (numericPart.size() < 3) {
numericPart.add("0");
}
return String.join(".", numericPart);
}
private void warnOnUnsupportedArtifacts(Collection<Artifact> nonJarArtifacts) {
List<Artifact> unsupportedArtifacts = nonJarArtifacts.stream().filter(a -> ! a.getType().equals("pom"))
.collect(Collectors.toList());
unsupportedArtifacts.forEach(artifact -> getLog()
.warn(String.format("Unsupported artifact '%s': Type '%s' is not supported. Please file a feature request.",
artifact.getId(), artifact.getType())));
}
private PackageTally getProjectClassesTally() {
File outputDirectory = new File(project.getBuild().getOutputDirectory());
List<ClassFileMetaData> analyzedClasses = allDescendantFiles(outputDirectory).filter(file -> file.getName().endsWith(".class"))
.map(Analyze::analyzeClass).collect(Collectors.toList());
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
private static PackageTally definedPackages(Collection<Artifact> jarArtifacts) {
return PackageTally.combine(jarArtifacts.stream().map(ja -> withJarFile(ja.getFile(), GenerateOsgiManifestMojo::definedPackages))
.collect(Collectors.toList()));
}
private static PackageTally definedPackages(JarFile jarFile) throws MojoExecutionException {
List<ClassFileMetaData> analyzedClasses = new ArrayList<>();
for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements();) {
JarEntry entry = entries.nextElement();
if (! entry.isDirectory() && entry.getName().endsWith(".class")) {
analyzedClasses.add(analyzeClass(jarFile, entry));
}
}
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
private static ClassFileMetaData analyzeClass(JarFile jarFile, JarEntry entry) throws MojoExecutionException {
try {
return withInputStream(jarFile, entry, Analyze::analyzeClass);
} catch (Exception e) {
throw new MojoExecutionException(
String.format("While analyzing the class '%s' in jar file '%s'", entry.getName(), jarFile.getName()), e);
}
}
private static Map<String, Optional<String>> getManualImports(String importPackage) {
try {
Map<String, Optional<String>> ret = new HashMap<>();
List<Export> imports = parseImportPackages(importPackage);
for (Export imp : imports) {
Optional<String> version = getVersionThrowOthers(imp.getParameters());
imp.getPackageNames().forEach(pn -> ret.put(pn, version));
}
return ret;
} catch (Exception e) {
throw new RuntimeException("Error in Import-Package:" + importPackage, e);
}
}
private static Optional<String> getVersionThrowOthers(List<ExportPackages.Parameter> parameters) {
if (parameters.size() == 1 && "version".equals(parameters.get(0).getName())) {
return Optional.of(parameters.get(0).getValue());
} else if (parameters.size() == 0) {
return Optional.empty();
} else {
List<String> paramNames = parameters.stream().map(ExportPackages.Parameter::getName).collect(Collectors.toList());
throw new RuntimeException("A single, optional version parameter expected, but got " + paramNames);
}
}
private static List<Export> parseImportPackages(String importPackages) {
return ExportPackageParser.parseExports(importPackages);
}
private static Optional<String> emptyToNone(String str) {
return Optional.ofNullable(str).map(String::trim).filter(s -> ! s.isEmpty());
}
} |
Remove | public void testMV() throws Exception {
String sql = "select count(distinct k7), count(distinct k8) from duplicate_table_with_null;";
String planFragment = getFragmentPlan(sql);
System.out.println("FIXME : " + planFragment);
Assert.assertTrue(planFragment.contains("OUTPUT EXPRS:16: count(distinct 7: k7) | 17: count(distinct 8: k8)"));
Assert.assertTrue(planFragment.contains("14: mv_bitmap_union_k7"));
Assert.assertTrue(planFragment.contains("15: mv_bitmap_union_k8"));
Assert.assertTrue(planFragment.contains("rollup: bitmap_mv"));
} | System.out.println("FIXME : " + planFragment); | public void testMV() throws Exception {
String sql = "select count(distinct k7), count(distinct k8) from duplicate_table_with_null;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("OUTPUT EXPRS:16: count(distinct 7: k7) | 17: count(distinct 8: k8)"));
Assert.assertTrue(planFragment.contains("14: mv_bitmap_union_k7"));
Assert.assertTrue(planFragment.contains("15: mv_bitmap_union_k8"));
Assert.assertTrue(planFragment.contains("rollup: bitmap_mv"));
} | class PlanFragmentWithCostTest extends PlanTestBase {
@BeforeClass
public static void beforeClass() throws Exception {
PlanTestBase.beforeClass();
FeConstants.default_scheduler_interval_millisecond = 1;
Catalog catalog = connectContext.getCatalog();
OlapTable table2 = (OlapTable) catalog.getDb("default_cluster:test").getTable("test_all_type");
setTableStatistics(table2, 10000);
OlapTable t0 = (OlapTable) catalog.getDb("default_cluster:test").getTable("t0");
setTableStatistics(t0, 10000);
StarRocksAssert starRocksAssert = new StarRocksAssert(connectContext);
starRocksAssert.withTable("CREATE TABLE test_mv\n" +
" (\n" +
" event_day int,\n" +
" siteid INT,\n" +
" citycode SMALLINT,\n" +
" username VARCHAR(32),\n" +
" pv BIGINT SUM DEFAULT '0'\n" +
" )\n" +
" AGGREGATE KEY(event_day, siteid, citycode, username)\n" +
" DISTRIBUTED BY HASH(siteid) BUCKETS 10\n" +
" rollup (\n" +
" r1(event_day,siteid),\n" +
" r2(event_day,citycode),\n" +
" r3(event_day),\n" +
" r4(event_day,pv),\n" +
" r5(event_day,siteid,pv)\n" +
" )\n" +
" PROPERTIES(\"replication_num\" = \"1\");");
starRocksAssert.withTable(" CREATE TABLE `duplicate_table_with_null` ( `k1` date, `k2` datetime, " +
"`k3` char(20), `k4` varchar(20), `k5` boolean, `k6` tinyint, " +
"`k7` smallint, `k8` int, `k9` bigint, `k10` largeint, " +
"`k11` float, `k12` double, `k13` decimal(27,9) ) " +
"ENGINE=OLAP DUPLICATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`) " +
"COMMENT \"OLAP\" DISTRIBUTED BY HASH(`k1`, `k2`, `k3`) " +
"BUCKETS 3 PROPERTIES ( \"replication_num\" = \"1\", " +
"\"storage_format\" = \"v2\" );");
starRocksAssert.withMaterializedView("CREATE MATERIALIZED VIEW bitmap_mv\n" +
" AS\n" +
" SELECT k1,k2,k3,k4, bitmap_union(to_bitmap(k7)), " +
"bitmap_union(to_bitmap(k8)) FROM duplicate_table_with_null group by k1,k2,k3,k4");
FeConstants.runningUnitTest = true;
}
@Before
public void before() {
connectContext.getSessionVariable().setNewPlanerAggStage(0);
}
private static final String V1 = "v1";
private static final String V2 = "v2";
private static final String V3 = "v3";
@Test
public void testAggWithLowCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, Lists.newArrayList("v2"));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 100));
}
};
String sql = "select sum(v2) from t0 group by v2";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:AGGREGATE (merge finalize)\n"
+ " | output: sum(4: sum(2: v2))\n"
+ " | group by: 2: v2"));
Assert.assertTrue(planFragment.contains(" 1:AGGREGATE (update serialize)\n"
+ " | STREAMING\n"
+ " | output: sum(2: v2)\n"
+ " | group by: 2: v2"));
}
@Test
public void testAggWithHighCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, Lists.newArrayList("v2"));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select sum(v2) from t0 group by v2";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 2:AGGREGATE (update finalize)\n"
+ " | output: sum(2: v2)\n"
+ " | group by: 2: v2"));
Assert.assertFalse(planFragment.contains(" 1:AGGREGATE (update serialize)\n" +
" | STREAMING\n" +
" | output: sum(2: v2)\n" +
" | group by: 2: v2"));
}
@Test
public void testSortWithLowCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 100),
new ColumnStatistic(0.0, 100, 0.0, 10, 100));
}
};
String sql = "select v1, sum(v2) from t0 group by v1 order by v1";
String planFragment = getFragmentPlan(sql);
Assert.assertFalse(planFragment.contains("TOP-N"));
Assert.assertTrue(planFragment.contains(" 2:SORT\n"
+ " | order by: <slot 1> 1: v1 ASC\n"
+ " | offset: 0"));
}
@Test
public void testSortWithHighCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select v1, sum(v2) from t0 group by v1 order by v1";
String planFragment = getFragmentPlan(sql);
Assert.assertFalse(planFragment.contains("TOP-N"));
Assert.assertTrue(planFragment.contains(" 2:SORT\n"
+ " | order by: <slot 1> 1: v1 ASC\n"
+ " | offset: 0"));
}
@Test
public void testTopNWithLowCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 100),
new ColumnStatistic(0.0, 100, 0.0, 10, 100));
}
};
String sql = "select v1, sum(v2) from t0 group by v1 order by v1 limit 1";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 2:TOP-N\n"
+ " | order by: <slot 1> 1: v1 ASC\n"
+ " | offset: 0\n"
+ " | limit: 1"));
}
@Test
public void testTopNWithHighCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select v1, sum(v2) from t0 group by v1 order by v1 limit 1";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("TOP-N"));
Assert.assertTrue(planFragment.contains(" 2:TOP-N\n"
+ " | order by: <slot 1> 1: v1 ASC\n"
+ " | offset: 0\n"
+ " | limit: 1"));
}
@Test
public void testDistinctWithoutGroupByWithLowCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage)
throws Exception {
connectContext.getSessionVariable().setNewPlanerAggStage(4);
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 100),
new ColumnStatistic(0.0, 100, 0.0, 10, 100));
}
};
String sql = "select count(distinct v2), sum(v1) from t0";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:AGGREGATE (merge serialize)\n"
+ " | output: sum(5: sum(1: v1))\n"
+ " | group by: 2: v2"));
Assert.assertTrue(planFragment.contains(" 6:AGGREGATE (merge finalize)\n"
+ " | output: count(4: count(distinct 2: v2)), sum(5: sum(1: v1))\n"
+ " | group by: \n"
+ " | use vectorized: true"));
Assert.assertTrue(planFragment.contains(" STREAM DATA SINK\n"
+ " EXCHANGE ID: 05\n"
+ " UNPARTITIONED"));
Assert.assertFalse(planFragment.contains("PLAN FRAGMENT 3"));
}
@Test
public void testDistinctWithoutGroupByWithHighCardinalityForceOneStage(
@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
connectContext.getSessionVariable().setNewPlanerAggStage(1);
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select count(distinct v2), sum(v1) from t0";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 2:AGGREGATE (update finalize)\n"
+ " | output: multi_distinct_count(2: v2), sum(1: v1)\n"
+ " | group by:"));
}
@Test
public void testDistinctWithGroupByWithLowCardinalityForceThreeStage(
@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
connectContext.getSessionVariable().setNewPlanerAggStage(3);
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V2, V3));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 50),
new ColumnStatistic(0.0, 100, 0.0, 10, 50));
}
};
String sql = "select count(distinct v2) from t0 group by v3";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:AGGREGATE (merge serialize)\n"
+ " | group by: 2: v2, 3: v3"));
Assert.assertTrue(planFragment.contains(" STREAM DATA SINK\n"
+ " EXCHANGE ID: 06\n"
+ " UNPARTITIONED"));
}
@Test
public void testDistinctWithGroupByWithHighCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage)
throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V2, V3));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select count(distinct v2) from t0 group by v3";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 2:AGGREGATE (update finalize)\n"
+ " | output: multi_distinct_count(2: v2)\n"
+ " | group by: 3: v3"));
}
@Test
public void testPredicateRewrittenByProjectWithLowCardinality(
@Mocked MockTpchStatisticStorage mockedStatisticStorage)
throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V2, V3));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 10),
new ColumnStatistic(0.0, 100, 0.0, 10, 10));
}
};
String sql = "SELECT -v3 from t0 group by v3, v2 having -v3 < 63;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 4:Project\n"
+ " | <slot 4> : -1 * 3: v3"));
Assert.assertTrue(planFragment.contains("PREDICATES: -1 * 3: v3 < 63"));
}
@Test
public void testPredicateRewrittenByProjectWithHighCardinality(
@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V2, V3));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "SELECT -v3 from t0 group by v3, v2 having -v3 < 63;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:Project\n"
+ " | <slot 4> : -1 * 3: v3"));
}
@Test
public void testShuffleInnerJoin() throws Exception {
UtFrameUtils.addMockBackend(10002);
UtFrameUtils.addMockBackend(10003);
Catalog catalog = connectContext.getCatalog();
OlapTable table1 = (OlapTable) catalog.getDb("default_cluster:test").getTable("t0");
setTableStatistics(table1, 10000);
OlapTable table2 = (OlapTable) catalog.getDb("default_cluster:test").getTable("test_all_type");
setTableStatistics(table2, 5000);
connectContext.getSessionVariable().setPreferJoinMethod("shuffle");
String sql = "SELECT v2,t1d from t0 join test_all_type on t0.v2 = test_all_type.t1d ;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 4:HASH JOIN\n"
+ " | join op: INNER JOIN (PARTITIONED)\n"
+ " | hash predicates:\n"
+ " | colocate: false, reason: \n"
+ " | equal join conjunct: 2: v2 = 7: t1d\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " |----3:EXCHANGE\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " 1:EXCHANGE"));
Assert.assertTrue(planFragment.contains(" EXCHANGE ID: 03\n"
+ " HASH_PARTITIONED: 7: t1d\n"
+ "\n"
+ " 2:OlapScanNode"));
Assert.assertTrue(planFragment.contains(" STREAM DATA SINK\n"
+ " EXCHANGE ID: 01\n"
+ " HASH_PARTITIONED: 2: v2\n"
+ "\n"
+ " 0:OlapScanNode"));
Catalog.getCurrentSystemInfo().dropBackend(10002);
Catalog.getCurrentSystemInfo().dropBackend(10003);
}
@Test
public void testBroadcastInnerJoin() throws Exception {
String sql = "SELECT v1, t1d from t0 join test_all_type on t0.v2 = test_all_type.t1d ;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:HASH JOIN\n"
+ " | join op: INNER JOIN (BROADCAST)\n"
+ " | hash predicates:\n"
+ " | colocate: false, reason: \n"
+ " | equal join conjunct: 2: v2 = 7: t1d\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " |----2:EXCHANGE\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " 0:OlapScanNode\n"
+ " TABLE: t0\n"));
}
@Test
public void testBroadcastInnerJoinWithCommutativity() throws Exception {
Catalog catalog = connectContext.getCatalog();
OlapTable table = (OlapTable) catalog.getDb("default_cluster:test").getTable("t0");
setTableStatistics(table, 1000);
String sql = "SELECT * from t0 join test_all_type on t0.v1 = test_all_type.t1d ;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:HASH JOIN\n"
+ " | join op: INNER JOIN (BROADCAST)\n"
+ " | hash predicates:\n"
+ " | colocate: false, reason: \n"
+ " | equal join conjunct: 7: t1d = 1: v1\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " |----2:EXCHANGE\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " 0:OlapScanNode\n"
+ " TABLE: test_all_type\n"));
setTableStatistics(table, 10000);
}
@Test
public void testColocateJoin() throws Exception {
String sql = "SELECT * from t0 join t0 as b on t0.v1 = b.v1;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("join op: INNER JOIN (COLOCATE)"));
}
@Test
public void testColocateAgg() throws Exception {
String sql = "SELECT count(*) from t0 group by t0.v1;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("1:AGGREGATE (update finalize)"));
}
@Test
public void testDistinctExpr() throws Exception {
String sql = "SELECT DISTINCT - - v1 DIV - 98 FROM t0;";
String planFragment = getFragmentPlan(sql);
Assert.assertFalse(planFragment.contains(" 2:AGGREGATE (update finalize)\n" +
" | group by: <slot 4>\n" +
" | use vectorized: true\n" +
" | \n" +
" 1:Project"));
Assert.assertTrue(planFragment.contains("EXCHANGE"));
}
@Test
public void testRollUp() throws Exception {
String sql = "select event_day from test_mv group by event_day;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r3"));
sql = "select count(*) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r3"));
sql = "select count(*), event_day from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r3"));
sql = "select event_day from test_mv where citycode = 1 group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r2"));
sql = "select siteid from test_mv where event_day = 1 group by siteid;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r1"));
sql = "select siteid from test_mv group by siteid, event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r1"));
sql = "select siteid from test_mv group by siteid, username;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: test_mv"));
sql = "select siteid,sum(pv) from test_mv group by siteid, username;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: test_mv"));
sql = "select sum(pv) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r4"));
sql = "select max(pv) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: test_mv"));
sql = "select max(event_day) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r3"));
sql = "select max(event_day), sum(pv) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r4"));
sql = "select max(event_day), max(pv) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: test_mv"));
}
@Test
@Test
public void testReplicatedJoin() throws Exception {
connectContext.getSessionVariable().setEnableReplicationJoin(true);
String sql = "select s_name, s_address from supplier, nation where s_suppkey in " +
"( select ps_suppkey from partsupp where ps_partkey in ( select p_partkey from part where p_name like 'forest%' ) and ps_availqty > " +
"( select 0.5 * sum(l_quantity) from lineitem where l_partkey = ps_partkey and l_suppkey = ps_suppkey and " +
"l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1' year ) ) " +
"and s_nationkey = n_nationkey and n_name = 'CANADA' order by s_name;";
String plan = getFragmentPlan(sql);
System.out.println(plan);
Assert.assertTrue(plan.contains(" 3:HASH JOIN\n" +
" | join op: INNER JOIN (REPLICATED)\n" +
" | hash predicates:\n" +
" | colocate: false, reason: \n" +
" | equal join conjunct: 4: S_NATIONKEY = 9: N_NATIONKEY"));
Assert.assertTrue(plan.contains("18:HASH JOIN\n" +
" | join op: LEFT SEMI JOIN (BUCKET_SHUFFLE)\n" +
" | hash predicates:\n" +
" | colocate: false, reason: \n" +
" | equal join conjunct: 1: S_SUPPKEY = 15: PS_SUPPKEY"));
connectContext.getSessionVariable().setEnableReplicationJoin(false);
}
@Test
public void testReapNodeExchange() throws Exception {
String sql = "select v1, v2, SUM(v3) from t0 group by rollup(v1, v2)";
String plan = getFragmentPlan(sql);
Assert.assertTrue(plan.contains(" STREAM DATA SINK\n" +
" EXCHANGE ID: 03\n" +
" HASH_PARTITIONED: 1: v1, 2: v2, 5: GROUPING_ID\n" +
"\n" +
" 2:AGGREGATE (update serialize)\n" +
" | STREAMING\n" +
" | output: sum(3: v3)\n" +
" | group by: 1: v1, 2: v2, 5: GROUPING_ID\n" +
" | use vectorized: true\n" +
" | \n" +
" 1:REPEAT_NODE\n" +
" | repeat: repeat 2 lines [[], [1], [1, 2]]\n" +
" | use vectorized: true\n" +
" | \n" +
" 0:OlapScanNode"));
sql = "select v1, SUM(v3) from t0 group by rollup(v1)";
plan = getFragmentPlan(sql);
Assert.assertTrue(plan.contains(" STREAM DATA SINK\n" +
" EXCHANGE ID: 03\n" +
" HASH_PARTITIONED: 1: v1, 5: GROUPING_ID\n" +
"\n" +
" 2:AGGREGATE (update serialize)\n" +
" | STREAMING\n" +
" | output: sum(3: v3)\n" +
" | group by: 1: v1, 5: GROUPING_ID\n" +
" | use vectorized: true\n" +
" | \n" +
" 1:REPEAT_NODE\n" +
" | repeat: repeat 1 lines [[], [1]]\n" +
" | use vectorized: true\n" +
" | \n" +
" 0:OlapScanNode\n" +
" TABLE: t0\n" +
" PREAGGREGATION: ON"));
sql = "select SUM(v3) from t0 group by grouping sets(())";
plan = getFragmentPlan(sql);
System.out.println(plan);
Assert.assertTrue(plan.contains(" 3:EXCHANGE\n" +
" use vectorized: true\n" +
"\n" +
"PLAN FRAGMENT 2\n" +
" OUTPUT EXPRS:\n" +
" PARTITION: RANDOM\n" +
"\n" +
" STREAM DATA SINK\n" +
" EXCHANGE ID: 03\n" +
" HASH_PARTITIONED: 5: GROUPING_ID\n" +
"\n" +
" 2:AGGREGATE (update serialize)\n" +
" | STREAMING\n" +
" | output: sum(3: v3)\n" +
" | group by: 5: GROUPING_ID\n" +
" | use vectorized: true\n" +
" | \n" +
" 1:REPEAT_NODE"));
}
} | class PlanFragmentWithCostTest extends PlanTestBase {
@BeforeClass
public static void beforeClass() throws Exception {
PlanTestBase.beforeClass();
FeConstants.default_scheduler_interval_millisecond = 1;
Catalog catalog = connectContext.getCatalog();
OlapTable table2 = (OlapTable) catalog.getDb("default_cluster:test").getTable("test_all_type");
setTableStatistics(table2, 10000);
OlapTable t0 = (OlapTable) catalog.getDb("default_cluster:test").getTable("t0");
setTableStatistics(t0, 10000);
StarRocksAssert starRocksAssert = new StarRocksAssert(connectContext);
starRocksAssert.withTable("CREATE TABLE test_mv\n" +
" (\n" +
" event_day int,\n" +
" siteid INT,\n" +
" citycode SMALLINT,\n" +
" username VARCHAR(32),\n" +
" pv BIGINT SUM DEFAULT '0'\n" +
" )\n" +
" AGGREGATE KEY(event_day, siteid, citycode, username)\n" +
" DISTRIBUTED BY HASH(siteid) BUCKETS 10\n" +
" rollup (\n" +
" r1(event_day,siteid),\n" +
" r2(event_day,citycode),\n" +
" r3(event_day),\n" +
" r4(event_day,pv),\n" +
" r5(event_day,siteid,pv)\n" +
" )\n" +
" PROPERTIES(\"replication_num\" = \"1\");");
starRocksAssert.withTable(" CREATE TABLE `duplicate_table_with_null` ( `k1` date, `k2` datetime, " +
"`k3` char(20), `k4` varchar(20), `k5` boolean, `k6` tinyint, " +
"`k7` smallint, `k8` int, `k9` bigint, `k10` largeint, " +
"`k11` float, `k12` double, `k13` decimal(27,9) ) " +
"ENGINE=OLAP DUPLICATE KEY(`k1`, `k2`, `k3`, `k4`, `k5`) " +
"COMMENT \"OLAP\" DISTRIBUTED BY HASH(`k1`, `k2`, `k3`) " +
"BUCKETS 3 PROPERTIES ( \"replication_num\" = \"1\", " +
"\"storage_format\" = \"v2\" );");
starRocksAssert.withMaterializedView("CREATE MATERIALIZED VIEW bitmap_mv\n" +
" AS\n" +
" SELECT k1,k2,k3,k4, bitmap_union(to_bitmap(k7)), " +
"bitmap_union(to_bitmap(k8)) FROM duplicate_table_with_null group by k1,k2,k3,k4");
FeConstants.runningUnitTest = true;
}
@Before
public void before() {
connectContext.getSessionVariable().setNewPlanerAggStage(0);
}
private static final String V1 = "v1";
private static final String V2 = "v2";
private static final String V3 = "v3";
@Test
public void testAggWithLowCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, Lists.newArrayList("v2"));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 100));
}
};
String sql = "select sum(v2) from t0 group by v2";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:AGGREGATE (merge finalize)\n"
+ " | output: sum(4: sum(2: v2))\n"
+ " | group by: 2: v2"));
Assert.assertTrue(planFragment.contains(" 1:AGGREGATE (update serialize)\n"
+ " | STREAMING\n"
+ " | output: sum(2: v2)\n"
+ " | group by: 2: v2"));
}
@Test
public void testAggWithHighCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, Lists.newArrayList("v2"));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select sum(v2) from t0 group by v2";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 2:AGGREGATE (update finalize)\n"
+ " | output: sum(2: v2)\n"
+ " | group by: 2: v2"));
Assert.assertFalse(planFragment.contains(" 1:AGGREGATE (update serialize)\n" +
" | STREAMING\n" +
" | output: sum(2: v2)\n" +
" | group by: 2: v2"));
}
@Test
public void testSortWithLowCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 100),
new ColumnStatistic(0.0, 100, 0.0, 10, 100));
}
};
String sql = "select v1, sum(v2) from t0 group by v1 order by v1";
String planFragment = getFragmentPlan(sql);
Assert.assertFalse(planFragment.contains("TOP-N"));
Assert.assertTrue(planFragment.contains(" 2:SORT\n"
+ " | order by: <slot 1> 1: v1 ASC\n"
+ " | offset: 0"));
}
@Test
public void testSortWithHighCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select v1, sum(v2) from t0 group by v1 order by v1";
String planFragment = getFragmentPlan(sql);
Assert.assertFalse(planFragment.contains("TOP-N"));
Assert.assertTrue(planFragment.contains(" 2:SORT\n"
+ " | order by: <slot 1> 1: v1 ASC\n"
+ " | offset: 0"));
}
@Test
public void testTopNWithLowCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 100),
new ColumnStatistic(0.0, 100, 0.0, 10, 100));
}
};
String sql = "select v1, sum(v2) from t0 group by v1 order by v1 limit 1";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 2:TOP-N\n"
+ " | order by: <slot 1> 1: v1 ASC\n"
+ " | offset: 0\n"
+ " | limit: 1"));
}
@Test
public void testTopNWithHighCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select v1, sum(v2) from t0 group by v1 order by v1 limit 1";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("TOP-N"));
Assert.assertTrue(planFragment.contains(" 2:TOP-N\n"
+ " | order by: <slot 1> 1: v1 ASC\n"
+ " | offset: 0\n"
+ " | limit: 1"));
}
@Test
public void testDistinctWithoutGroupByWithLowCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage)
throws Exception {
connectContext.getSessionVariable().setNewPlanerAggStage(4);
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 100),
new ColumnStatistic(0.0, 100, 0.0, 10, 100));
}
};
String sql = "select count(distinct v2), sum(v1) from t0";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:AGGREGATE (merge serialize)\n"
+ " | output: sum(5: sum(1: v1))\n"
+ " | group by: 2: v2"));
Assert.assertTrue(planFragment.contains(" 6:AGGREGATE (merge finalize)\n"
+ " | output: count(4: count(distinct 2: v2)), sum(5: sum(1: v1))\n"
+ " | group by: \n"
+ " | use vectorized: true"));
Assert.assertTrue(planFragment.contains(" STREAM DATA SINK\n"
+ " EXCHANGE ID: 05\n"
+ " UNPARTITIONED"));
Assert.assertFalse(planFragment.contains("PLAN FRAGMENT 3"));
}
@Test
public void testDistinctWithoutGroupByWithHighCardinalityForceOneStage(
@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
connectContext.getSessionVariable().setNewPlanerAggStage(1);
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V1, V2));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select count(distinct v2), sum(v1) from t0";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 2:AGGREGATE (update finalize)\n"
+ " | output: multi_distinct_count(2: v2), sum(1: v1)\n"
+ " | group by:"));
}
@Test
public void testDistinctWithGroupByWithLowCardinalityForceThreeStage(
@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
connectContext.getSessionVariable().setNewPlanerAggStage(3);
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V2, V3));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 50),
new ColumnStatistic(0.0, 100, 0.0, 10, 50));
}
};
String sql = "select count(distinct v2) from t0 group by v3";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:AGGREGATE (merge serialize)\n"
+ " | group by: 2: v2, 3: v3"));
Assert.assertTrue(planFragment.contains(" STREAM DATA SINK\n"
+ " EXCHANGE ID: 06\n"
+ " UNPARTITIONED"));
}
@Test
public void testDistinctWithGroupByWithHighCardinality(@Mocked MockTpchStatisticStorage mockedStatisticStorage)
throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V2, V3));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "select count(distinct v2) from t0 group by v3";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 2:AGGREGATE (update finalize)\n"
+ " | output: multi_distinct_count(2: v2)\n"
+ " | group by: 3: v3"));
}
@Test
public void testPredicateRewrittenByProjectWithLowCardinality(
@Mocked MockTpchStatisticStorage mockedStatisticStorage)
throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V2, V3));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 10),
new ColumnStatistic(0.0, 100, 0.0, 10, 10));
}
};
String sql = "SELECT -v3 from t0 group by v3, v2 having -v3 < 63;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 4:Project\n"
+ " | <slot 4> : -1 * 3: v3"));
Assert.assertTrue(planFragment.contains("PREDICATES: -1 * 3: v3 < 63"));
}
@Test
public void testPredicateRewrittenByProjectWithHighCardinality(
@Mocked MockTpchStatisticStorage mockedStatisticStorage) throws Exception {
new Expectations() {
{
mockedStatisticStorage.getColumnStatistics((Table) any, ImmutableList.of(V2, V3));
result = ImmutableList.of(new ColumnStatistic(0.0, 100, 0.0, 10, 7000),
new ColumnStatistic(0.0, 100, 0.0, 10, 7000));
}
};
String sql = "SELECT -v3 from t0 group by v3, v2 having -v3 < 63;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:Project\n"
+ " | <slot 4> : -1 * 3: v3"));
}
@Test
public void testShuffleInnerJoin() throws Exception {
UtFrameUtils.addMockBackend(10002);
UtFrameUtils.addMockBackend(10003);
Catalog catalog = connectContext.getCatalog();
OlapTable table1 = (OlapTable) catalog.getDb("default_cluster:test").getTable("t0");
setTableStatistics(table1, 10000);
OlapTable table2 = (OlapTable) catalog.getDb("default_cluster:test").getTable("test_all_type");
setTableStatistics(table2, 5000);
connectContext.getSessionVariable().setPreferJoinMethod("shuffle");
String sql = "SELECT v2,t1d from t0 join test_all_type on t0.v2 = test_all_type.t1d ;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 4:HASH JOIN\n"
+ " | join op: INNER JOIN (PARTITIONED)\n"
+ " | hash predicates:\n"
+ " | colocate: false, reason: \n"
+ " | equal join conjunct: 2: v2 = 7: t1d\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " |----3:EXCHANGE\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " 1:EXCHANGE"));
Assert.assertTrue(planFragment.contains(" EXCHANGE ID: 03\n"
+ " HASH_PARTITIONED: 7: t1d\n"
+ "\n"
+ " 2:OlapScanNode"));
Assert.assertTrue(planFragment.contains(" STREAM DATA SINK\n"
+ " EXCHANGE ID: 01\n"
+ " HASH_PARTITIONED: 2: v2\n"
+ "\n"
+ " 0:OlapScanNode"));
Catalog.getCurrentSystemInfo().dropBackend(10002);
Catalog.getCurrentSystemInfo().dropBackend(10003);
}
@Test
public void testBroadcastInnerJoin() throws Exception {
String sql = "SELECT v1, t1d from t0 join test_all_type on t0.v2 = test_all_type.t1d ;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:HASH JOIN\n"
+ " | join op: INNER JOIN (BROADCAST)\n"
+ " | hash predicates:\n"
+ " | colocate: false, reason: \n"
+ " | equal join conjunct: 2: v2 = 7: t1d\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " |----2:EXCHANGE\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " 0:OlapScanNode\n"
+ " TABLE: t0\n"));
}
@Test
public void testBroadcastInnerJoinWithCommutativity() throws Exception {
Catalog catalog = connectContext.getCatalog();
OlapTable table = (OlapTable) catalog.getDb("default_cluster:test").getTable("t0");
setTableStatistics(table, 1000);
String sql = "SELECT * from t0 join test_all_type on t0.v1 = test_all_type.t1d ;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains(" 3:HASH JOIN\n"
+ " | join op: INNER JOIN (BROADCAST)\n"
+ " | hash predicates:\n"
+ " | colocate: false, reason: \n"
+ " | equal join conjunct: 7: t1d = 1: v1\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " |----2:EXCHANGE\n"
+ " | use vectorized: true\n"
+ " | \n"
+ " 0:OlapScanNode\n"
+ " TABLE: test_all_type\n"));
setTableStatistics(table, 10000);
}
@Test
public void testColocateJoin() throws Exception {
String sql = "SELECT * from t0 join t0 as b on t0.v1 = b.v1;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("join op: INNER JOIN (COLOCATE)"));
}
@Test
public void testColocateAgg() throws Exception {
String sql = "SELECT count(*) from t0 group by t0.v1;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("1:AGGREGATE (update finalize)"));
}
@Test
public void testDistinctExpr() throws Exception {
String sql = "SELECT DISTINCT - - v1 DIV - 98 FROM t0;";
String planFragment = getFragmentPlan(sql);
Assert.assertFalse(planFragment.contains(" 2:AGGREGATE (update finalize)\n" +
" | group by: <slot 4>\n" +
" | use vectorized: true\n" +
" | \n" +
" 1:Project"));
Assert.assertTrue(planFragment.contains("EXCHANGE"));
}
@Test
public void testRollUp() throws Exception {
String sql = "select event_day from test_mv group by event_day;";
String planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r3"));
sql = "select count(*) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r3"));
sql = "select count(*), event_day from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r3"));
sql = "select event_day from test_mv where citycode = 1 group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r2"));
sql = "select siteid from test_mv where event_day = 1 group by siteid;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r1"));
sql = "select siteid from test_mv group by siteid, event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r1"));
sql = "select siteid from test_mv group by siteid, username;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: test_mv"));
sql = "select siteid,sum(pv) from test_mv group by siteid, username;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: test_mv"));
sql = "select sum(pv) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r4"));
sql = "select max(pv) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: test_mv"));
sql = "select max(event_day) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r3"));
sql = "select max(event_day), sum(pv) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: r4"));
sql = "select max(event_day), max(pv) from test_mv group by event_day;";
planFragment = getFragmentPlan(sql);
Assert.assertTrue(planFragment.contains("rollup: test_mv"));
}
@Test
@Test
public void testReplicatedJoin() throws Exception {
connectContext.getSessionVariable().setEnableReplicationJoin(true);
String sql = "select s_name, s_address from supplier, nation where s_suppkey in " +
"( select ps_suppkey from partsupp where ps_partkey in ( select p_partkey from part where p_name like 'forest%' ) and ps_availqty > " +
"( select 0.5 * sum(l_quantity) from lineitem where l_partkey = ps_partkey and l_suppkey = ps_suppkey and " +
"l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1' year ) ) " +
"and s_nationkey = n_nationkey and n_name = 'CANADA' order by s_name;";
String plan = getFragmentPlan(sql);
System.out.println(plan);
Assert.assertTrue(plan.contains(" 3:HASH JOIN\n" +
" | join op: INNER JOIN (REPLICATED)\n" +
" | hash predicates:\n" +
" | colocate: false, reason: \n" +
" | equal join conjunct: 4: S_NATIONKEY = 9: N_NATIONKEY"));
Assert.assertTrue(plan.contains("18:HASH JOIN\n" +
" | join op: LEFT SEMI JOIN (BUCKET_SHUFFLE)\n" +
" | hash predicates:\n" +
" | colocate: false, reason: \n" +
" | equal join conjunct: 1: S_SUPPKEY = 15: PS_SUPPKEY"));
connectContext.getSessionVariable().setEnableReplicationJoin(false);
}
@Test
public void testReapNodeExchange() throws Exception {
String sql = "select v1, v2, SUM(v3) from t0 group by rollup(v1, v2)";
String plan = getFragmentPlan(sql);
Assert.assertTrue(plan.contains(" STREAM DATA SINK\n" +
" EXCHANGE ID: 03\n" +
" HASH_PARTITIONED: 1: v1, 2: v2, 5: GROUPING_ID\n" +
"\n" +
" 2:AGGREGATE (update serialize)\n" +
" | STREAMING\n" +
" | output: sum(3: v3)\n" +
" | group by: 1: v1, 2: v2, 5: GROUPING_ID\n" +
" | use vectorized: true\n" +
" | \n" +
" 1:REPEAT_NODE\n" +
" | repeat: repeat 2 lines [[], [1], [1, 2]]\n" +
" | use vectorized: true\n" +
" | \n" +
" 0:OlapScanNode"));
sql = "select v1, SUM(v3) from t0 group by rollup(v1)";
plan = getFragmentPlan(sql);
Assert.assertTrue(plan.contains(" STREAM DATA SINK\n" +
" EXCHANGE ID: 03\n" +
" HASH_PARTITIONED: 1: v1, 5: GROUPING_ID\n" +
"\n" +
" 2:AGGREGATE (update serialize)\n" +
" | STREAMING\n" +
" | output: sum(3: v3)\n" +
" | group by: 1: v1, 5: GROUPING_ID\n" +
" | use vectorized: true\n" +
" | \n" +
" 1:REPEAT_NODE\n" +
" | repeat: repeat 1 lines [[], [1]]\n" +
" | use vectorized: true\n" +
" | \n" +
" 0:OlapScanNode\n" +
" TABLE: t0\n" +
" PREAGGREGATION: ON"));
sql = "select SUM(v3) from t0 group by grouping sets(())";
plan = getFragmentPlan(sql);
System.out.println(plan);
Assert.assertTrue(plan.contains(" 3:EXCHANGE\n" +
" use vectorized: true\n" +
"\n" +
"PLAN FRAGMENT 2\n" +
" OUTPUT EXPRS:\n" +
" PARTITION: RANDOM\n" +
"\n" +
" STREAM DATA SINK\n" +
" EXCHANGE ID: 03\n" +
" HASH_PARTITIONED: 5: GROUPING_ID\n" +
"\n" +
" 2:AGGREGATE (update serialize)\n" +
" | STREAMING\n" +
" | output: sum(3: v3)\n" +
" | group by: 5: GROUPING_ID\n" +
" | use vectorized: true\n" +
" | \n" +
" 1:REPEAT_NODE"));
}
} |
Redundant `this` (also on the line above). | public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId, name, deployState);
this.tlsSecrets = deployState.tlsSecrets();
this.enableGroupingSessionCache = deployState.getProperties().enableGroupingSessionCache();
restApiGroup = new ConfigProducerGroup<>(this, "rest-api");
servletGroup = new ConfigProducerGroup<>(this, "servlet");
addSimpleComponent(DEFAULT_LINGUISTICS_PROVIDER);
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.DeprecatedSecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
addTestrunnerComponentsIfTester(deployState);
} | this.enableGroupingSessionCache = deployState.getProperties().enableGroupingSessionCache(); | public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId, name, deployState);
this.tlsSecrets = deployState.tlsSecrets();
this.enableGroupingSessionCache = deployState.getProperties().enableGroupingSessionCache();
restApiGroup = new ConfigProducerGroup<>(this, "rest-api");
servletGroup = new ConfigProducerGroup<>(this, "servlet");
addSimpleComponent(DEFAULT_LINGUISTICS_PROVIDER);
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.DeprecatedSecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
addTestrunnerComponentsIfTester(deployState);
} | class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements
BundlesConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
ServletPathsConfig.Producer
{
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
private final ConfigProducerGroup<Servlet> servletGroup;
private final ConfigProducerGroup<RestApi> restApiGroup;
private ContainerModelEvaluation modelEvaluation;
private Optional<TlsSecrets> tlsSecrets;
private final boolean enableGroupingSessionCache;
@Override
protected void doPrepare(DeployState deployState) {
addAndSendApplicationBundles(deployState);
if (modelEvaluation != null)
modelEvaluation.prepare(containers);
sendUserConfiguredFiles(deployState);
for (RestApi restApi : restApiGroup.getComponents())
restApi.prepare();
}
private void addAndSendApplicationBundles(DeployState deployState) {
for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) {
FileReference reference = FileSender.sendFileToServices(component.getPathRelativeToAppDir(), containers);
applicationBundles.add(reference);
}
}
private void sendUserConfiguredFiles(DeployState deployState) {
for (Component<?, ?> component : getAllComponents()) {
FileSender.sendUserConfiguredFiles(component, containers, deployState.getDeployLogger());
}
}
private void addTestrunnerComponentsIfTester(DeployState deployState) {
if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester())
addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("lib/jars/vespa-testrunner-components-jar-with-dependencies.jar")));
}
public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) {
this.modelEvaluation = modelEvaluation;
}
public final void addRestApi(@NonNull RestApi restApi) {
restApiGroup.addComponent(ComponentId.fromString(restApi.getBindingPath()), restApi);
}
public Map<ComponentId, RestApi> getRestApiMap() {
return restApiGroup.getComponentMap();
}
public Map<ComponentId, Servlet> getServletMap() {
return servletGroup.getComponentMap();
}
public final void addServlet(@NonNull Servlet servlet) {
servletGroup.addComponent(servlet.getGlobalComponentId(), servlet);
}
public Collection<Servlet> getAllServlets() {
return allServlets().collect(Collectors.toCollection(ArrayList::new));
}
private Stream<Servlet> allServlets() {
return Stream.concat(allJersey2Servlets(),
servletGroup.getComponents().stream());
}
private Stream<Jersey2Servlet> allJersey2Servlets() {
return restApiGroup.getComponents().stream().map(RestApi::getJersey2Servlet);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
applicationBundles.stream().map(FileReference::value)
.forEach(builder::bundle);
super.getConfig(builder);
}
@Override
public void getConfig(ServletPathsConfig.Builder builder) {
allServlets().forEach(servlet ->
builder.servlets(servlet.getComponentId().stringValue(),
servlet.toConfigBuilder())
);
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(RankingConstantsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
public Optional<TlsSecrets> getTlsSecrets() {
return tlsSecrets;
}
public boolean enableGroupingSessionCache() {
return enableGroupingSessionCache;
}
} | class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements
BundlesConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
ServletPathsConfig.Producer
{
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
private final ConfigProducerGroup<Servlet> servletGroup;
private final ConfigProducerGroup<RestApi> restApiGroup;
private ContainerModelEvaluation modelEvaluation;
private Optional<TlsSecrets> tlsSecrets;
private final boolean enableGroupingSessionCache;
@Override
protected void doPrepare(DeployState deployState) {
addAndSendApplicationBundles(deployState);
if (modelEvaluation != null)
modelEvaluation.prepare(containers);
sendUserConfiguredFiles(deployState);
for (RestApi restApi : restApiGroup.getComponents())
restApi.prepare();
}
private void addAndSendApplicationBundles(DeployState deployState) {
for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) {
FileReference reference = FileSender.sendFileToServices(component.getPathRelativeToAppDir(), containers);
applicationBundles.add(reference);
}
}
private void sendUserConfiguredFiles(DeployState deployState) {
for (Component<?, ?> component : getAllComponents()) {
FileSender.sendUserConfiguredFiles(component, containers, deployState.getDeployLogger());
}
}
private void addTestrunnerComponentsIfTester(DeployState deployState) {
if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester())
addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("lib/jars/vespa-testrunner-components-jar-with-dependencies.jar")));
}
public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) {
this.modelEvaluation = modelEvaluation;
}
public final void addRestApi(@NonNull RestApi restApi) {
restApiGroup.addComponent(ComponentId.fromString(restApi.getBindingPath()), restApi);
}
public Map<ComponentId, RestApi> getRestApiMap() {
return restApiGroup.getComponentMap();
}
public Map<ComponentId, Servlet> getServletMap() {
return servletGroup.getComponentMap();
}
public final void addServlet(@NonNull Servlet servlet) {
servletGroup.addComponent(servlet.getGlobalComponentId(), servlet);
}
public Collection<Servlet> getAllServlets() {
return allServlets().collect(Collectors.toCollection(ArrayList::new));
}
private Stream<Servlet> allServlets() {
return Stream.concat(allJersey2Servlets(),
servletGroup.getComponents().stream());
}
private Stream<Jersey2Servlet> allJersey2Servlets() {
return restApiGroup.getComponents().stream().map(RestApi::getJersey2Servlet);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
applicationBundles.stream().map(FileReference::value)
.forEach(builder::bundle);
super.getConfig(builder);
}
@Override
public void getConfig(ServletPathsConfig.Builder builder) {
allServlets().forEach(servlet ->
builder.servlets(servlet.getComponentId().stringValue(),
servlet.toConfigBuilder())
);
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(RankingConstantsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
public Optional<TlsSecrets> getTlsSecrets() {
return tlsSecrets;
}
public boolean enableGroupingSessionCache() {
return enableGroupingSessionCache;
}
} |
Redundant `this` | public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.options = options;
cluster.addComponent(getFS4ResourcePool());
this.enableGroupingSessionCache = cluster.enableGroupingSessionCache();
} | this.enableGroupingSessionCache = cluster.enableGroupingSessionCache(); | public ContainerSearch(ApplicationContainerCluster cluster, SearchChains chains, Options options) {
super(chains);
this.options = options;
cluster.addComponent(getFS4ResourcePool());
this.enableGroupingSessionCache = cluster.enableGroupingSessionCache();
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private final List<AbstractSearchCluster> systems = new LinkedList<>();
private final Options options;
private final boolean enableGroupingSessionCache;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
private static Component<?, ComponentModel> getFS4ResourcePool() {
BundleInstantiationSpecification spec = BundleInstantiationSpecification.
getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null);
return new Component<>(new ComponentModel(spec));
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
systems.addAll(searchClusters.values());
initializeSearchChains(searchClusters);
}
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider: getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider: getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles!=null) {
queryProfiles.getConfig(builder);
builder.enableGroupingSessionCache(enableGroupingSessionCache);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules!=null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates!=null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : systems) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : systems) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < systems.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(systems, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if (sys instanceof IndexedSearchCluster) {
for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) {
scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder().
host(tld.getHostname()).
port(tld.getDispatchPort()));
}
} else {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index) {
return sys;
}
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/**
* Struct that encapsulates qrserver options.
*/
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} | class ContainerSearch extends ContainerSubsystem<SearchChains>
implements
IndexInfoConfig.Producer,
IlscriptsConfig.Producer,
QrSearchersConfig.Producer,
QueryProfilesConfig.Producer,
SemanticRulesConfig.Producer,
PageTemplatesConfig.Producer {
private final List<AbstractSearchCluster> systems = new LinkedList<>();
private final Options options;
private final boolean enableGroupingSessionCache;
private QueryProfiles queryProfiles;
private SemanticRules semanticRules;
private PageTemplates pageTemplates;
private static Component<?, ComponentModel> getFS4ResourcePool() {
BundleInstantiationSpecification spec = BundleInstantiationSpecification.
getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null);
return new Component<>(new ComponentModel(spec));
}
public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) {
systems.addAll(searchClusters.values());
initializeSearchChains(searchClusters);
}
public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) {
getChains().initialize(searchClusters);
QrsCache defaultCacheOptions = getOptions().cacheSettings.get("");
if (defaultCacheOptions != null) {
for (LocalProvider localProvider: getChains().localProviders()) {
localProvider.setCacheSize(defaultCacheOptions.size);
}
}
for (LocalProvider localProvider: getChains().localProviders()) {
QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName());
if (cacheOptions != null) {
localProvider.setCacheSize(cacheOptions.size);
}
}
}
public void setQueryProfiles(QueryProfiles queryProfiles) {
this.queryProfiles = queryProfiles;
}
public void setSemanticRules(SemanticRules semanticRules) {
this.semanticRules = semanticRules;
}
public void setPageTemplates(PageTemplates pageTemplates) {
this.pageTemplates = pageTemplates;
}
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
if (queryProfiles!=null) {
queryProfiles.getConfig(builder);
builder.enableGroupingSessionCache(enableGroupingSessionCache);
}
}
@Override
public void getConfig(SemanticRulesConfig.Builder builder) {
if (semanticRules!=null) semanticRules.getConfig(builder);
}
@Override
public void getConfig(PageTemplatesConfig.Builder builder) {
if (pageTemplates!=null) pageTemplates.getConfig(builder);
}
@Override
public void getConfig(IndexInfoConfig.Builder builder) {
for (AbstractSearchCluster sc : systems) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(IlscriptsConfig.Builder builder) {
for (AbstractSearchCluster sc : systems) {
sc.getConfig(builder);
}
}
@Override
public void getConfig(QrSearchersConfig.Builder builder) {
for (int i = 0; i < systems.size(); i++) {
AbstractSearchCluster sys = findClusterWithId(systems, i);
QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder().
name(sys.getClusterName());
for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) {
scB.searchdef(spec.getSearchDefinition().getName());
}
scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId()));
scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName()));
if (sys instanceof IndexedSearchCluster) {
for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) {
scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder().
host(tld.getHostname()).
port(tld.getDispatchPort()));
}
} else {
scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder().
routespec(((StreamingSearchCluster)sys).getStorageRouteSpec()));
}
builder.searchcluster(scB);
}
}
private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) {
for (AbstractSearchCluster sys : clusters) {
if (sys.getClusterIndex() == index) {
return sys;
}
}
throw new IllegalArgumentException("No search cluster with index " + index + " exists");
}
public Options getOptions() {
return options;
}
/**
* Struct that encapsulates qrserver options.
*/
public static class Options {
Map<String, QrsCache> cacheSettings = new LinkedHashMap<>();
}
} |
I agree that it is strictly not necessary; it's more about what code style a developer prefers. | public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId, name, deployState);
this.tlsSecrets = deployState.tlsSecrets();
this.enableGroupingSessionCache = deployState.getProperties().enableGroupingSessionCache();
restApiGroup = new ConfigProducerGroup<>(this, "rest-api");
servletGroup = new ConfigProducerGroup<>(this, "servlet");
addSimpleComponent(DEFAULT_LINGUISTICS_PROVIDER);
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.DeprecatedSecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
addTestrunnerComponentsIfTester(deployState);
} | this.enableGroupingSessionCache = deployState.getProperties().enableGroupingSessionCache(); | public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId, name, deployState);
this.tlsSecrets = deployState.tlsSecrets();
this.enableGroupingSessionCache = deployState.getProperties().enableGroupingSessionCache();
restApiGroup = new ConfigProducerGroup<>(this, "rest-api");
servletGroup = new ConfigProducerGroup<>(this, "servlet");
addSimpleComponent(DEFAULT_LINGUISTICS_PROVIDER);
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.DeprecatedSecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
addTestrunnerComponentsIfTester(deployState);
} | class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements
BundlesConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
ServletPathsConfig.Producer
{
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
private final ConfigProducerGroup<Servlet> servletGroup;
private final ConfigProducerGroup<RestApi> restApiGroup;
private ContainerModelEvaluation modelEvaluation;
private Optional<TlsSecrets> tlsSecrets;
private final boolean enableGroupingSessionCache;
@Override
protected void doPrepare(DeployState deployState) {
addAndSendApplicationBundles(deployState);
if (modelEvaluation != null)
modelEvaluation.prepare(containers);
sendUserConfiguredFiles(deployState);
for (RestApi restApi : restApiGroup.getComponents())
restApi.prepare();
}
private void addAndSendApplicationBundles(DeployState deployState) {
for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) {
FileReference reference = FileSender.sendFileToServices(component.getPathRelativeToAppDir(), containers);
applicationBundles.add(reference);
}
}
private void sendUserConfiguredFiles(DeployState deployState) {
for (Component<?, ?> component : getAllComponents()) {
FileSender.sendUserConfiguredFiles(component, containers, deployState.getDeployLogger());
}
}
private void addTestrunnerComponentsIfTester(DeployState deployState) {
if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester())
addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("lib/jars/vespa-testrunner-components-jar-with-dependencies.jar")));
}
public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) {
this.modelEvaluation = modelEvaluation;
}
public final void addRestApi(@NonNull RestApi restApi) {
restApiGroup.addComponent(ComponentId.fromString(restApi.getBindingPath()), restApi);
}
public Map<ComponentId, RestApi> getRestApiMap() {
return restApiGroup.getComponentMap();
}
public Map<ComponentId, Servlet> getServletMap() {
return servletGroup.getComponentMap();
}
public final void addServlet(@NonNull Servlet servlet) {
servletGroup.addComponent(servlet.getGlobalComponentId(), servlet);
}
public Collection<Servlet> getAllServlets() {
return allServlets().collect(Collectors.toCollection(ArrayList::new));
}
private Stream<Servlet> allServlets() {
return Stream.concat(allJersey2Servlets(),
servletGroup.getComponents().stream());
}
private Stream<Jersey2Servlet> allJersey2Servlets() {
return restApiGroup.getComponents().stream().map(RestApi::getJersey2Servlet);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
applicationBundles.stream().map(FileReference::value)
.forEach(builder::bundle);
super.getConfig(builder);
}
@Override
public void getConfig(ServletPathsConfig.Builder builder) {
allServlets().forEach(servlet ->
builder.servlets(servlet.getComponentId().stringValue(),
servlet.toConfigBuilder())
);
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(RankingConstantsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
public Optional<TlsSecrets> getTlsSecrets() {
return tlsSecrets;
}
public boolean enableGroupingSessionCache() {
return enableGroupingSessionCache;
}
} | class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements
BundlesConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
ServletPathsConfig.Producer
{
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
private final ConfigProducerGroup<Servlet> servletGroup;
private final ConfigProducerGroup<RestApi> restApiGroup;
private ContainerModelEvaluation modelEvaluation;
private Optional<TlsSecrets> tlsSecrets;
private final boolean enableGroupingSessionCache;
@Override
protected void doPrepare(DeployState deployState) {
addAndSendApplicationBundles(deployState);
if (modelEvaluation != null)
modelEvaluation.prepare(containers);
sendUserConfiguredFiles(deployState);
for (RestApi restApi : restApiGroup.getComponents())
restApi.prepare();
}
private void addAndSendApplicationBundles(DeployState deployState) {
for (ComponentInfo component : deployState.getApplicationPackage().getComponentsInfo(deployState.getVespaVersion())) {
FileReference reference = FileSender.sendFileToServices(component.getPathRelativeToAppDir(), containers);
applicationBundles.add(reference);
}
}
private void sendUserConfiguredFiles(DeployState deployState) {
for (Component<?, ?> component : getAllComponents()) {
FileSender.sendUserConfiguredFiles(component, containers, deployState.getDeployLogger());
}
}
private void addTestrunnerComponentsIfTester(DeployState deployState) {
if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester())
addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("lib/jars/vespa-testrunner-components-jar-with-dependencies.jar")));
}
public void setModelEvaluation(ContainerModelEvaluation modelEvaluation) {
this.modelEvaluation = modelEvaluation;
}
public final void addRestApi(@NonNull RestApi restApi) {
restApiGroup.addComponent(ComponentId.fromString(restApi.getBindingPath()), restApi);
}
public Map<ComponentId, RestApi> getRestApiMap() {
return restApiGroup.getComponentMap();
}
public Map<ComponentId, Servlet> getServletMap() {
return servletGroup.getComponentMap();
}
public final void addServlet(@NonNull Servlet servlet) {
servletGroup.addComponent(servlet.getGlobalComponentId(), servlet);
}
public Collection<Servlet> getAllServlets() {
return allServlets().collect(Collectors.toCollection(ArrayList::new));
}
private Stream<Servlet> allServlets() {
return Stream.concat(allJersey2Servlets(),
servletGroup.getComponents().stream());
}
private Stream<Jersey2Servlet> allJersey2Servlets() {
return restApiGroup.getComponents().stream().map(RestApi::getJersey2Servlet);
}
@Override
public void getConfig(BundlesConfig.Builder builder) {
applicationBundles.stream().map(FileReference::value)
.forEach(builder::bundle);
super.getConfig(builder);
}
@Override
public void getConfig(ServletPathsConfig.Builder builder) {
allServlets().forEach(servlet ->
builder.servlets(servlet.getComponentId().stringValue(),
servlet.toConfigBuilder())
);
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
@Override
public void getConfig(RankingConstantsConfig.Builder builder) {
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
public Optional<TlsSecrets> getTlsSecrets() {
return tlsSecrets;
}
public boolean enableGroupingSessionCache() {
return enableGroupingSessionCache;
}
} |
40x sounds like a big change. Will it impact error detection/load balancing ? I would probably start with 1 minute as a 4x change. | public HttpClient createClient() {
HttpClientBuilder clientBuilder;
if (connectionParams.useTlsConfigFromEnvironment()) {
clientBuilder = VespaTlsAwareClientBuilder.createHttpClientBuilder();
} else {
clientBuilder = HttpClientBuilder.create();
if (useSsl && connectionParams.getSslContext() != null) {
clientBuilder.setSslcontext(connectionParams.getSslContext());
clientBuilder.setSSLHostnameVerifier(connectionParams.getHostnameVerifier());
}
}
clientBuilder.setMaxConnPerRoute(1);
clientBuilder.setMaxConnTotal(1);
clientBuilder.setConnectionTimeToLive(10, TimeUnit.MINUTES);
clientBuilder.setUserAgent(String.format("vespa-http-client (%s)", Vtag.currentVersion));
clientBuilder.setDefaultHeaders(Collections.singletonList(new BasicHeader(Headers.CLIENT_VERSION, Vtag.currentVersion)));
clientBuilder.disableContentCompression();
{
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom();
requestConfigBuilder.setSocketTimeout(0);
if (connectionParams.getProxyHost() != null) {
requestConfigBuilder.setProxy(new HttpHost(connectionParams.getProxyHost(), connectionParams.getProxyPort()));
}
clientBuilder.setDefaultRequestConfig(requestConfigBuilder.build());
}
log.fine("Creating HttpClient: " + " ConnectionTimeout "
+ " SocketTimeout 0 secs "
+ " proxyhost (can be null) " + connectionParams.getProxyHost()
+ ":" + connectionParams.getProxyPort()
+ (useSsl ? " using ssl " : " not using ssl")
);
return clientBuilder.build();
} | clientBuilder.setConnectionTimeToLive(10, TimeUnit.MINUTES); | public HttpClient createClient() {
HttpClientBuilder clientBuilder;
if (connectionParams.useTlsConfigFromEnvironment()) {
clientBuilder = VespaTlsAwareClientBuilder.createHttpClientBuilder();
} else {
clientBuilder = HttpClientBuilder.create();
if (useSsl && connectionParams.getSslContext() != null) {
clientBuilder.setSslcontext(connectionParams.getSslContext());
clientBuilder.setSSLHostnameVerifier(connectionParams.getHostnameVerifier());
}
}
clientBuilder.setMaxConnPerRoute(1);
clientBuilder.setMaxConnTotal(1);
clientBuilder.setConnectionTimeToLive(1, TimeUnit.MINUTES);
clientBuilder.setUserAgent(String.format("vespa-http-client (%s)", Vtag.currentVersion));
clientBuilder.setDefaultHeaders(Collections.singletonList(new BasicHeader(Headers.CLIENT_VERSION, Vtag.currentVersion)));
clientBuilder.disableContentCompression();
{
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom();
requestConfigBuilder.setSocketTimeout(0);
if (connectionParams.getProxyHost() != null) {
requestConfigBuilder.setProxy(new HttpHost(connectionParams.getProxyHost(), connectionParams.getProxyPort()));
}
clientBuilder.setDefaultRequestConfig(requestConfigBuilder.build());
}
log.fine("Creating HttpClient: " + " ConnectionTimeout "
+ " SocketTimeout 0 secs "
+ " proxyhost (can be null) " + connectionParams.getProxyHost()
+ ":" + connectionParams.getProxyPort()
+ (useSsl ? " using ssl " : " not using ssl")
);
return clientBuilder.build();
} | class HttpClientFactory {
final ConnectionParams connectionParams;
final boolean useSsl;
public HttpClientFactory(final ConnectionParams connectionParams, final boolean useSsl) {
this.connectionParams = connectionParams;
this.useSsl = useSsl;
}
} | class HttpClientFactory {
final ConnectionParams connectionParams;
final boolean useSsl;
public HttpClientFactory(final ConnectionParams connectionParams, final boolean useSsl) {
this.connectionParams = connectionParams;
this.useSsl = useSsl;
}
} |
Ok, let's try with one minute first. I'll give you the motivation for this change offline. | public HttpClient createClient() {
HttpClientBuilder clientBuilder;
if (connectionParams.useTlsConfigFromEnvironment()) {
clientBuilder = VespaTlsAwareClientBuilder.createHttpClientBuilder();
} else {
clientBuilder = HttpClientBuilder.create();
if (useSsl && connectionParams.getSslContext() != null) {
clientBuilder.setSslcontext(connectionParams.getSslContext());
clientBuilder.setSSLHostnameVerifier(connectionParams.getHostnameVerifier());
}
}
clientBuilder.setMaxConnPerRoute(1);
clientBuilder.setMaxConnTotal(1);
clientBuilder.setConnectionTimeToLive(10, TimeUnit.MINUTES);
clientBuilder.setUserAgent(String.format("vespa-http-client (%s)", Vtag.currentVersion));
clientBuilder.setDefaultHeaders(Collections.singletonList(new BasicHeader(Headers.CLIENT_VERSION, Vtag.currentVersion)));
clientBuilder.disableContentCompression();
{
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom();
requestConfigBuilder.setSocketTimeout(0);
if (connectionParams.getProxyHost() != null) {
requestConfigBuilder.setProxy(new HttpHost(connectionParams.getProxyHost(), connectionParams.getProxyPort()));
}
clientBuilder.setDefaultRequestConfig(requestConfigBuilder.build());
}
log.fine("Creating HttpClient: " + " ConnectionTimeout "
+ " SocketTimeout 0 secs "
+ " proxyhost (can be null) " + connectionParams.getProxyHost()
+ ":" + connectionParams.getProxyPort()
+ (useSsl ? " using ssl " : " not using ssl")
);
return clientBuilder.build();
} | clientBuilder.setConnectionTimeToLive(10, TimeUnit.MINUTES); | public HttpClient createClient() {
HttpClientBuilder clientBuilder;
if (connectionParams.useTlsConfigFromEnvironment()) {
clientBuilder = VespaTlsAwareClientBuilder.createHttpClientBuilder();
} else {
clientBuilder = HttpClientBuilder.create();
if (useSsl && connectionParams.getSslContext() != null) {
clientBuilder.setSslcontext(connectionParams.getSslContext());
clientBuilder.setSSLHostnameVerifier(connectionParams.getHostnameVerifier());
}
}
clientBuilder.setMaxConnPerRoute(1);
clientBuilder.setMaxConnTotal(1);
clientBuilder.setConnectionTimeToLive(1, TimeUnit.MINUTES);
clientBuilder.setUserAgent(String.format("vespa-http-client (%s)", Vtag.currentVersion));
clientBuilder.setDefaultHeaders(Collections.singletonList(new BasicHeader(Headers.CLIENT_VERSION, Vtag.currentVersion)));
clientBuilder.disableContentCompression();
{
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom();
requestConfigBuilder.setSocketTimeout(0);
if (connectionParams.getProxyHost() != null) {
requestConfigBuilder.setProxy(new HttpHost(connectionParams.getProxyHost(), connectionParams.getProxyPort()));
}
clientBuilder.setDefaultRequestConfig(requestConfigBuilder.build());
}
log.fine("Creating HttpClient: " + " ConnectionTimeout "
+ " SocketTimeout 0 secs "
+ " proxyhost (can be null) " + connectionParams.getProxyHost()
+ ":" + connectionParams.getProxyPort()
+ (useSsl ? " using ssl " : " not using ssl")
);
return clientBuilder.build();
} | class HttpClientFactory {
final ConnectionParams connectionParams;
final boolean useSsl;
public HttpClientFactory(final ConnectionParams connectionParams, final boolean useSsl) {
this.connectionParams = connectionParams;
this.useSsl = useSsl;
}
} | class HttpClientFactory {
final ConnectionParams connectionParams;
final boolean useSsl;
public HttpClientFactory(final ConnectionParams connectionParams, final boolean useSsl) {
this.connectionParams = connectionParams;
this.useSsl = useSsl;
}
} |
Any reason this must wait for 8 ? | void writeHeaderHeader(Writer w, CNode root) throws IOException {
String [] namespaceList = generateCppNameSpace(root);
String namespacePrint = generateCppNameSpaceString(namespaceList);
String namespaceDefine = generateCppNameSpaceDefine(namespaceList);
String className = getTypeName(root, false);
String defineName = namespaceDefine + "_" + getDefineName(className);
w.write(""
+ "/**\n"
+ " * @class " + namespacePrint + "::" + className + "\n"
+ " * @ingroup config\n"
+ " *\n"
+ " * @brief This is an autogenerated class for handling VESPA config.\n"
+ " *\n"
+ " * This class is autogenerated by vespa from a config definition file.\n"
+ " * To subscribe to config, you need to include the config/config.h header, \n"
+ " * and create a ConfigSubscriber in order to subscribe for config.\n"
);
if (root.getComment().length() > 0) {
w.write(" *\n");
StringTokenizer st = new StringTokenizer(root.getComment(), "\n");
while (st.hasMoreTokens()) {
w.write(" * " + st.nextToken() + "\n");
}
}
w.write(""
+ " */\n"
+ "
+ "
+ "\n"
+ "
+ "
+ "
+ "
+ "\n");
w.write("namespace config {\n");
w.write(" class ConfigValue;\n");
w.write(" class ConfigPayload;\n");
w.write("}\n\n");
w.write("namespace vespalib::slime {\n");
w.write(" struct Inspector;\n");
w.write(" struct Cursor;\n");
w.write("}\n\n");
writeNameSpaceBegin(w, namespaceList);
w.write("\nnamespace internal {\n\n");
w.write(""
+ "/**\n"
+ " * This class contains the config. DO NOT USE THIS CLASS DIRECTLY. Use the typedeffed\n"
+ " * versions after this class declaration.\n"
+ " */\n"
+ "class Internal" + className + "Type : public ::config::ConfigInstance\n"
+ "{\n"
);
}
void writeTypeDeclarations(Writer w, CNode node, String indent) throws IOException {
java.util.Set<String> declaredTypes = new java.util.HashSet<String>();
for (CNode child : node.getChildren()) {
boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf);
if (complexType && !declaredTypes.contains(child.getName())) {
String typeName = getTypeName(child, false);
declaredTypes.add(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
w.write(indent + "enum " + typeName + " { ");
LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child;
for (int i=0; i<leaf.getLegalValues().length; ++i) {
if (i != 0) {
w.write(", ");
}
w.write(leaf.getLegalValues()[i]);
}
w.write(" };\n"
+ indent + "typedef std::vector<" + typeName + "> "
+ typeName + "Vector;"
+ "\n"
+ indent + "typedef std::map<vespalib::string, " + typeName + "> "
+ typeName + "Map;"
+ "\n"
+ indent + "static " + typeName + " get" + typeName + "(const vespalib::string&);\n"
+ indent + "static vespalib::string get" + typeName + "Name(" + typeName + " e);\n"
+ "\n"
);
w.write(indent + "struct Internal" + typeName + "Converter {\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector);\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector);\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault);\n");
w.write(indent + "};\n");
} else {
w.write(indent + "class " + typeName + " {\n");
w.write(indent + "public:\n");
writeTypeDeclarations(w, child, indent + " ");
writeStructFunctionDeclarations(w, getTypeName(child, false), child, indent + " ");
writeMembers(w, child, indent + " ");
w.write(indent + "};\n");
w.write(indent + "typedef std::vector<" + typeName + "> " + typeName + "Vector;\n\n");
w.write(indent + "typedef std::map<vespalib::string, " + typeName + "> " + typeName + "Map;\n\n");
}
}
}
}
void writeHeaderFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(""
+ indent + "const vespalib::string & defName() const override { return CONFIG_DEF_NAME; }\n"
+ indent + "const vespalib::string & defVersion() const { return CONFIG_DEF_VERSION; }\n"
+ indent + "const vespalib::string & defMd5() const override { return CONFIG_DEF_MD5; }\n"
+ indent + "const vespalib::string & defNamespace() const override { return CONFIG_DEF_NAMESPACE; }\n"
+ indent + "void serialize(::config::ConfigDataBuffer & __buffer) const override;\n");
writeConfigClassFunctionDeclarations(w, "Internal" + className + "Type", node, indent);
}
void writeConfigClassFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(indent + className + "(const ::config::ConfigValue & __value);\n");
w.write(indent + className + "(const ::config::ConfigDataBuffer & __value);\n");
w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n");
writeCommonFunctionDeclarations(w, className, node, indent);
}
void writeStructFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(indent + className + "(const std::vector<vespalib::string> & __lines);\n");
w.write(indent + className + "(const vespalib::slime::Inspector & __inspector);\n");
w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n");
writeCommonFunctionDeclarations(w, className, node, indent);
w.write(indent + "void serialize(vespalib::slime::Cursor & __cursor) const;\n");
}
void writeClassCopyConstructorDeclaration(Writer w, String className, String indent) throws IOException {
w.write(indent + className + "(const " + className + " & __rhs);\n");
}
void writeClassAssignmentOperatorDeclaration(Writer w, String className, String indent) throws IOException {
w.write(indent + className + " & operator = (const " + className + " & __rhs);\n");
}
void writeConfigClassCopyConstructorDefinition(Writer w, String parent, String className) throws IOException {
w.write(parent + "::" + className + "(const " + className + " & __rhs) = default;\n");
}
void writeConfigClassAssignmentOperatorDefinition(Writer w, String parent, String className) throws IOException {
w.write(parent + " & " + parent + "::" + "operator =(const " + className + " & __rhs) = default;\n");
}
void writeClassCopyConstructorDefinition(Writer w, String parent, CNode node) throws IOException {
String typeName = getTypeName(node, false);
w.write(parent + "::" + typeName + "(const " + typeName + " & __rhs) = default;\n");
}
void writeClassAssignmentOperatorDefinition(Writer w, String parent, CNode node) throws IOException {
String typeName = getTypeName(node, false);
w.write(parent + " & " + parent + "::" + "operator = (const " + typeName + " & __rhs) = default;\n");
}
void writeDestructor(Writer w, String parent, String className) throws IOException {
w.write(parent + "~" + className + "() { } \n");
}
void writeCommonFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write("" + indent + className + "();\n");
writeClassCopyConstructorDeclaration(w, className, indent);
writeClassAssignmentOperatorDeclaration(w, className, indent);
w.write("" + indent + "~" + className + "();\n");
w.write("\n"
+ indent + "bool operator==(const " + className + "& __rhs) const;\n"
+ indent + "bool operator!=(const " + className + "& __rhs) const;\n"
+ "\n"
);
}
static String getTypeName(CNode node, boolean includeArray) {
String type = null;
if (node instanceof InnerCNode) {
InnerCNode innerNode = (InnerCNode) node;
type = getTypeName(innerNode.getName());
} else if (node instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) node;
if (leaf.getType().equals("bool")) {
type = "bool";
} else if (leaf.getType().equals("int")) {
type = "int32_t";
} else if (leaf.getType().equals("long")) {
type = "int64_t";
} else if (leaf.getType().equals("double")) {
type = "double";
} else if (leaf.getType().equals("enum")) {
type = getTypeName(node.getName());
} else if (leaf.getType().equals("string")) {
type = "vespalib::string";
} else if (leaf.getType().equals("reference")) {
type = "vespalib::string";
} else if (leaf.getType().equals("file")) {
type = "vespalib::string";
} else {
throw new IllegalArgumentException("Unknown leaf datatype " + leaf.getType());
}
}
if (type == null) {
throw new IllegalArgumentException("Unknown node " + node);
}
if (node.isArray && includeArray) {
if (vectorTypeDefs.containsKey(type)) {
type = vectorTypeDefs.get(type);
} else {
type = type + "Vector";
}
} else if (node.isMap && includeArray) {
if (mapTypeDefs.containsKey(type)) {
type = mapTypeDefs.get(type);
} else {
type = type + "Map";
}
}
return type;
}
void writeStaticMemberDeclarations(Writer w, String indent) throws IOException {
w.write(""
+ indent + "static const vespalib::string CONFIG_DEF_MD5;\n"
+ indent + "static const vespalib::string CONFIG_DEF_VERSION;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAME;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAMESPACE;\n"
+ indent + "static const std::vector<vespalib::string> CONFIG_DEF_SCHEMA;\n"
+ indent + "static const int64_t CONFIG_DEF_SERIALIZE_VERSION;\n"
+ "\n"
);
}
void writeComment(Writer w, String indent, String comment, boolean javadoc)
throws IOException
{
/** If simple one liner comment, write on one line. */
if (javadoc && comment.indexOf('\n') == -1
&& comment.length() <= 80 - (indent.length() + 7))
{
w.write(indent + "/** " + comment + " */\n");
return;
} else if (!javadoc && comment.indexOf('\n') == -1
&& comment.length() <= 80 - (indent.length() + 3))
{
w.write(indent + "
return;
}
/** If not we need to write multi line comment. */
int maxLineLen = 80 - (indent.length() + 3);
if (javadoc) w.write(indent + "/**\n");
do {
String current;
int newLine = comment.indexOf('\n');
if (newLine == -1) {
current = comment;
comment = "";
} else {
current = comment.substring(0, newLine);
comment = comment.substring(newLine + 1);
}
if (current.length() > maxLineLen) {
int spaceIndex = current.lastIndexOf(' ', maxLineLen);
if (spaceIndex >= maxLineLen - 15) {
comment = current.substring(spaceIndex + 1)
+ "\n" + comment;
current = current.substring(0, spaceIndex);
} else {
comment = current.substring(maxLineLen) + "\n" + comment;
current = current.substring(0, maxLineLen) + "-";
}
}
w.write(indent + (javadoc ? " * " : "
} while (comment.length() > 0);
if (javadoc) w.write(indent + " */\n");
}
void writeMembers(Writer w, CNode node, String indent) throws IOException {
for (CNode child : node.getChildren()) {
String typeName = getTypeName(child, true);
if (child.getComment().length() > 0) {
String comment = child.getComment();
int index;
do {
index = comment.indexOf("\n\n");
if (index == -1) break;
String next = comment.substring(0, index);
comment = comment.substring(index + 2);
w.write("\n");
writeComment(w, indent, next, false);
} while (true);
w.write("\n");
writeComment(w, indent, comment, true);
}
w.write(indent + typeName + " " + getIdentifier(child.getName()) + ";");
if (child instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) child;
DefaultValue value = leaf.getDefaultValue();
if (value != null) {
w.write("
}
}
w.write("\n");
}
}
void writeHeaderTypeDefs(Writer w, CNode root, String indent) throws IOException {
w.write(indent + "typedef std::unique_ptr<const " + getInternalClassName(root) + "> UP;\n");
for (Map.Entry<String, String> entry : vectorTypeDefs.entrySet()) {
String typeName = entry.getKey();
String vectorName = entry.getValue();
String typeDef = "typedef std::vector<" + typeName + "> " + vectorName;
w.write(indent + typeDef + ";\n");
}
for (Map.Entry<String, String> entry : mapTypeDefs.entrySet()) {
String typeName = entry.getKey();
String mapName = entry.getValue();
String typeDef = "typedef std::map<vespalib::string, " + typeName + "> " + mapName;
w.write(indent + typeDef + ";\n");
}
}
private static String getInternalClassName(CNode root) {
return "Internal" + getTypeName(root, false) + "Type";
}
void writeHeaderFooter(Writer w, CNode root) throws IOException {
String [] namespaceList = generateCppNameSpace(root);
String namespaceDefine = generateCppNameSpaceDefine(namespaceList);
String className = getTypeName(root, false);
String defineName = namespaceDefine + "_" + getDefineName(className);
w.write(""
+ "};\n"
+ "\n"
+ "}
w.write("typedef internal::" + getInternalClassName(root) + " " + className + "ConfigBuilder;\n");
w.write("typedef const internal::" + getInternalClassName(root) + " " + className + "Config;\n");
w.write("\n");
writeNameSpaceEnd(w, namespaceList);
w.write("
} | + indent + "const vespalib::string & defVersion() const { return CONFIG_DEF_VERSION; }\n" | void writeHeaderHeader(Writer w, CNode root) throws IOException {
String [] namespaceList = generateCppNameSpace(root);
String namespacePrint = generateCppNameSpaceString(namespaceList);
String namespaceDefine = generateCppNameSpaceDefine(namespaceList);
String className = getTypeName(root, false);
String defineName = namespaceDefine + "_" + getDefineName(className);
w.write(""
+ "/**\n"
+ " * @class " + namespacePrint + "::" + className + "\n"
+ " * @ingroup config\n"
+ " *\n"
+ " * @brief This is an autogenerated class for handling VESPA config.\n"
+ " *\n"
+ " * This class is autogenerated by vespa from a config definition file.\n"
+ " * To subscribe to config, you need to include the config/config.h header, \n"
+ " * and create a ConfigSubscriber in order to subscribe for config.\n"
);
if (root.getComment().length() > 0) {
w.write(" *\n");
StringTokenizer st = new StringTokenizer(root.getComment(), "\n");
while (st.hasMoreTokens()) {
w.write(" * " + st.nextToken() + "\n");
}
}
w.write(""
+ " */\n"
+ "
+ "
+ "\n"
+ "
+ "
+ "
+ "
+ "\n");
w.write("namespace config {\n");
w.write(" class ConfigValue;\n");
w.write(" class ConfigPayload;\n");
w.write("}\n\n");
w.write("namespace vespalib::slime {\n");
w.write(" struct Inspector;\n");
w.write(" struct Cursor;\n");
w.write("}\n\n");
writeNameSpaceBegin(w, namespaceList);
w.write("\nnamespace internal {\n\n");
w.write(""
+ "/**\n"
+ " * This class contains the config. DO NOT USE THIS CLASS DIRECTLY. Use the typedeffed\n"
+ " * versions after this class declaration.\n"
+ " */\n"
+ "class Internal" + className + "Type : public ::config::ConfigInstance\n"
+ "{\n"
);
}
void writeTypeDeclarations(Writer w, CNode node, String indent) throws IOException {
java.util.Set<String> declaredTypes = new java.util.HashSet<String>();
for (CNode child : node.getChildren()) {
boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf);
if (complexType && !declaredTypes.contains(child.getName())) {
String typeName = getTypeName(child, false);
declaredTypes.add(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
w.write(indent + "enum " + typeName + " { ");
LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child;
for (int i=0; i<leaf.getLegalValues().length; ++i) {
if (i != 0) {
w.write(", ");
}
w.write(leaf.getLegalValues()[i]);
}
w.write(" };\n"
+ indent + "typedef std::vector<" + typeName + "> "
+ typeName + "Vector;"
+ "\n"
+ indent + "typedef std::map<vespalib::string, " + typeName + "> "
+ typeName + "Map;"
+ "\n"
+ indent + "static " + typeName + " get" + typeName + "(const vespalib::string&);\n"
+ indent + "static vespalib::string get" + typeName + "Name(" + typeName + " e);\n"
+ "\n"
);
w.write(indent + "struct Internal" + typeName + "Converter {\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector);\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector);\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault);\n");
w.write(indent + "};\n");
} else {
w.write(indent + "class " + typeName + " {\n");
w.write(indent + "public:\n");
writeTypeDeclarations(w, child, indent + " ");
writeStructFunctionDeclarations(w, getTypeName(child, false), child, indent + " ");
writeMembers(w, child, indent + " ");
w.write(indent + "};\n");
w.write(indent + "typedef std::vector<" + typeName + "> " + typeName + "Vector;\n\n");
w.write(indent + "typedef std::map<vespalib::string, " + typeName + "> " + typeName + "Map;\n\n");
}
}
}
}
void writeHeaderFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(""
+ indent + "const vespalib::string & defName() const override { return CONFIG_DEF_NAME; }\n"
+ indent + "const vespalib::string & defMd5() const override { return CONFIG_DEF_MD5; }\n"
+ indent + "const vespalib::string & defNamespace() const override { return CONFIG_DEF_NAMESPACE; }\n"
+ indent + "void serialize(::config::ConfigDataBuffer & __buffer) const override;\n");
writeConfigClassFunctionDeclarations(w, "Internal" + className + "Type", node, indent);
}
void writeConfigClassFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(indent + className + "(const ::config::ConfigValue & __value);\n");
w.write(indent + className + "(const ::config::ConfigDataBuffer & __value);\n");
w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n");
writeCommonFunctionDeclarations(w, className, node, indent);
}
void writeStructFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(indent + className + "(const std::vector<vespalib::string> & __lines);\n");
w.write(indent + className + "(const vespalib::slime::Inspector & __inspector);\n");
w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n");
writeCommonFunctionDeclarations(w, className, node, indent);
w.write(indent + "void serialize(vespalib::slime::Cursor & __cursor) const;\n");
}
void writeClassCopyConstructorDeclaration(Writer w, String className, String indent) throws IOException {
w.write(indent + className + "(const " + className + " & __rhs);\n");
}
void writeClassAssignmentOperatorDeclaration(Writer w, String className, String indent) throws IOException {
w.write(indent + className + " & operator = (const " + className + " & __rhs);\n");
}
void writeConfigClassCopyConstructorDefinition(Writer w, String parent, String className) throws IOException {
w.write(parent + "::" + className + "(const " + className + " & __rhs) = default;\n");
}
void writeConfigClassAssignmentOperatorDefinition(Writer w, String parent, String className) throws IOException {
w.write(parent + " & " + parent + "::" + "operator =(const " + className + " & __rhs) = default;\n");
}
void writeClassCopyConstructorDefinition(Writer w, String parent, CNode node) throws IOException {
String typeName = getTypeName(node, false);
w.write(parent + "::" + typeName + "(const " + typeName + " & __rhs) = default;\n");
}
void writeClassAssignmentOperatorDefinition(Writer w, String parent, CNode node) throws IOException {
String typeName = getTypeName(node, false);
w.write(parent + " & " + parent + "::" + "operator = (const " + typeName + " & __rhs) = default;\n");
}
void writeDestructor(Writer w, String parent, String className) throws IOException {
w.write(parent + "~" + className + "() { } \n");
}
void writeCommonFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write("" + indent + className + "();\n");
writeClassCopyConstructorDeclaration(w, className, indent);
writeClassAssignmentOperatorDeclaration(w, className, indent);
w.write("" + indent + "~" + className + "();\n");
w.write("\n"
+ indent + "bool operator==(const " + className + "& __rhs) const;\n"
+ indent + "bool operator!=(const " + className + "& __rhs) const;\n"
+ "\n"
);
}
static String getTypeName(CNode node, boolean includeArray) {
String type = null;
if (node instanceof InnerCNode) {
InnerCNode innerNode = (InnerCNode) node;
type = getTypeName(innerNode.getName());
} else if (node instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) node;
if (leaf.getType().equals("bool")) {
type = "bool";
} else if (leaf.getType().equals("int")) {
type = "int32_t";
} else if (leaf.getType().equals("long")) {
type = "int64_t";
} else if (leaf.getType().equals("double")) {
type = "double";
} else if (leaf.getType().equals("enum")) {
type = getTypeName(node.getName());
} else if (leaf.getType().equals("string")) {
type = "vespalib::string";
} else if (leaf.getType().equals("reference")) {
type = "vespalib::string";
} else if (leaf.getType().equals("file")) {
type = "vespalib::string";
} else {
throw new IllegalArgumentException("Unknown leaf datatype " + leaf.getType());
}
}
if (type == null) {
throw new IllegalArgumentException("Unknown node " + node);
}
if (node.isArray && includeArray) {
if (vectorTypeDefs.containsKey(type)) {
type = vectorTypeDefs.get(type);
} else {
type = type + "Vector";
}
} else if (node.isMap && includeArray) {
if (mapTypeDefs.containsKey(type)) {
type = mapTypeDefs.get(type);
} else {
type = type + "Map";
}
}
return type;
}
void writeStaticMemberDeclarations(Writer w, String indent) throws IOException {
w.write(""
+ indent + "static const vespalib::string CONFIG_DEF_MD5;\n"
+ indent + "static const vespalib::string CONFIG_DEF_VERSION;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAME;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAMESPACE;\n"
+ indent + "static const std::vector<vespalib::string> CONFIG_DEF_SCHEMA;\n"
+ indent + "static const int64_t CONFIG_DEF_SERIALIZE_VERSION;\n"
+ "\n"
);
}
void writeComment(Writer w, String indent, String comment, boolean javadoc)
throws IOException
{
/** If simple one liner comment, write on one line. */
if (javadoc && comment.indexOf('\n') == -1
&& comment.length() <= 80 - (indent.length() + 7))
{
w.write(indent + "/** " + comment + " */\n");
return;
} else if (!javadoc && comment.indexOf('\n') == -1
&& comment.length() <= 80 - (indent.length() + 3))
{
w.write(indent + "
return;
}
/** If not we need to write multi line comment. */
int maxLineLen = 80 - (indent.length() + 3);
if (javadoc) w.write(indent + "/**\n");
do {
String current;
int newLine = comment.indexOf('\n');
if (newLine == -1) {
current = comment;
comment = "";
} else {
current = comment.substring(0, newLine);
comment = comment.substring(newLine + 1);
}
if (current.length() > maxLineLen) {
int spaceIndex = current.lastIndexOf(' ', maxLineLen);
if (spaceIndex >= maxLineLen - 15) {
comment = current.substring(spaceIndex + 1)
+ "\n" + comment;
current = current.substring(0, spaceIndex);
} else {
comment = current.substring(maxLineLen) + "\n" + comment;
current = current.substring(0, maxLineLen) + "-";
}
}
w.write(indent + (javadoc ? " * " : "
} while (comment.length() > 0);
if (javadoc) w.write(indent + " */\n");
}
void writeMembers(Writer w, CNode node, String indent) throws IOException {
for (CNode child : node.getChildren()) {
String typeName = getTypeName(child, true);
if (child.getComment().length() > 0) {
String comment = child.getComment();
int index;
do {
index = comment.indexOf("\n\n");
if (index == -1) break;
String next = comment.substring(0, index);
comment = comment.substring(index + 2);
w.write("\n");
writeComment(w, indent, next, false);
} while (true);
w.write("\n");
writeComment(w, indent, comment, true);
}
w.write(indent + typeName + " " + getIdentifier(child.getName()) + ";");
if (child instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) child;
DefaultValue value = leaf.getDefaultValue();
if (value != null) {
w.write("
}
}
w.write("\n");
}
}
void writeHeaderTypeDefs(Writer w, CNode root, String indent) throws IOException {
w.write(indent + "typedef std::unique_ptr<const " + getInternalClassName(root) + "> UP;\n");
for (Map.Entry<String, String> entry : vectorTypeDefs.entrySet()) {
String typeName = entry.getKey();
String vectorName = entry.getValue();
String typeDef = "typedef std::vector<" + typeName + "> " + vectorName;
w.write(indent + typeDef + ";\n");
}
for (Map.Entry<String, String> entry : mapTypeDefs.entrySet()) {
String typeName = entry.getKey();
String mapName = entry.getValue();
String typeDef = "typedef std::map<vespalib::string, " + typeName + "> " + mapName;
w.write(indent + typeDef + ";\n");
}
}
private static String getInternalClassName(CNode root) {
return "Internal" + getTypeName(root, false) + "Type";
}
void writeHeaderFooter(Writer w, CNode root) throws IOException {
String [] namespaceList = generateCppNameSpace(root);
String namespaceDefine = generateCppNameSpaceDefine(namespaceList);
String className = getTypeName(root, false);
String defineName = namespaceDefine + "_" + getDefineName(className);
w.write(""
+ "};\n"
+ "\n"
+ "}
w.write("typedef internal::" + getInternalClassName(root) + " " + className + "ConfigBuilder;\n");
w.write("typedef const internal::" + getInternalClassName(root) + " " + className + "Config;\n");
w.write("\n");
writeNameSpaceEnd(w, namespaceList);
w.write("
} | class CppClassBuilder implements ClassBuilder {
private final CNode root;
private final NormalizedDefinition nd;
private final File rootDir;
private final String relativePathUnderRoot;
private static final Map<String, String> vectorTypeDefs;
static {
Map<String, String> map = new HashMap<String, String>();
map.put("bool", "BoolVector");
map.put("int32_t", "IntVector");
map.put("int64_t", "LongVector");
map.put("double", "DoubleVector");
map.put("vespalib::string", "StringVector");
vectorTypeDefs = Collections.unmodifiableMap(map);
}
private static final Map<String, String> mapTypeDefs;
static {
Map<String, String> map = new HashMap<>();
map.put("bool", "BoolMap");
map.put("int32_t", "IntMap");
map.put("int64_t", "LongMap");
map.put("double", "DoubleMap");
map.put("vespalib::string", "StringMap");
mapTypeDefs = Collections.unmodifiableMap(map);
}
private static final Map<String, String> slimeTypeMap;
static {
Map<String, String> map = new HashMap<String, String>();
map.put("bool", "Bool");
map.put("int", "Long");
map.put("long", "Long");
map.put("double", "Double");
map.put("string", "String");
map.put("enum", "String");
map.put("file", "String");
map.put("reference", "String");
slimeTypeMap = Collections.unmodifiableMap(map);
}
public CppClassBuilder(CNode root, NormalizedDefinition nd, File rootDir, String relativePathUnderRoot) {
this.root = root;
this.nd = nd;
this.rootDir = rootDir;
this.relativePathUnderRoot = relativePathUnderRoot;
}
public void createConfigClasses() {
generateConfig(root, nd);
}
String readFile(File f) throws IOException {
if (!f.isFile()) return null;
StringBuilder sb = new StringBuilder();
try (BufferedReader sr = new BufferedReader(new FileReader(f))) {
while (true) {
String line = sr.readLine();
if (line == null) break;
sb.append(line).append("\n");
}
return sb.toString();
}
}
void writeFile(File f, String content) throws IOException {
FileWriter fw = new FileWriter(f);
fw.write(content);
fw.close();
}
void generateConfig(CNode root, NormalizedDefinition nd) {
try{
StringWriter headerWriter = new StringWriter();
StringWriter bodyWriter = new StringWriter();
writeHeaderFile(headerWriter, root);
writeBodyFile(bodyWriter, root, relativePathUnderRoot, nd);
String newHeader = headerWriter.toString();
String newBody = bodyWriter.toString();
File headerFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "h"));
File bodyFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "cpp"));
String oldHeader = readFile(headerFile);
String oldBody = readFile(bodyFile);
if (oldHeader == null || !oldHeader.equals(newHeader)) {
writeFile(headerFile, newHeader);
}
if (oldBody == null || !oldBody.equals(newBody)) {
writeFile(bodyFile, newBody);
}
} catch (IOException e) {
e.printStackTrace();
}
}
String getFileName(CNode node, String extension) {
return "config-" + node.getName() + "." + extension;
}
static String removeDashesAndUpperCaseAllFirstChars(String source, boolean capitalizeFirst) {
String parts[] = source.split("[-_]");
StringBuilder sb = new StringBuilder();
for (String s : parts) {
sb.append(s.substring(0, 1).toUpperCase()).append(s.substring(1));
}
String result = sb.toString();
if (!capitalizeFirst) {
result = result.substring(0,1).toLowerCase() + result.substring(1);
}
return result;
}
/** Convert name of type to the name we want to use in macro ifdefs in file. */
String getDefineName(String name) {
return name.toUpperCase().replace("-", "");
}
/** Convert name of type to the name we want to use as type name in the generated code. */
static String getTypeName(String name) {
return removeDashesAndUpperCaseAllFirstChars(name, true);
}
/** Convert name of an identifier from value in def file to name to use in C++ file. */
String getIdentifier(String name) {
return removeDashesAndUpperCaseAllFirstChars(name, false);
}
void writeHeaderFile(Writer w, CNode root) throws IOException {
writeHeaderHeader(w, root);
writeHeaderPublic(w, root);
writeHeaderFooter(w, root);
}
void writeHeaderPublic(Writer w, CNode root) throws IOException {
w.write("public:\n");
writeHeaderTypeDefs(w, root, " ");
writeTypeDeclarations(w, root, " ");
writeHeaderFunctionDeclarations(w, getTypeName(root, false), root, " ");
writeStaticMemberDeclarations(w, " ");
writeMembers(w, root, " ");
}
String [] generateCppNameSpace(CNode root) {
String namespace = root.getNamespace();
if (namespace.contains(".")) {
return namespace.split("\\.");
}
return new String[]{namespace};
}
String generateCppNameSpaceString(String[] namespaceList) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < namespaceList.length - 1; i++) {
str.append(namespaceList[i]);
str.append("::");
}
str.append(namespaceList[namespaceList.length - 1]);
return str.toString();
}
String generateCppNameSpaceDefine(String[] namespaceList) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < namespaceList.length - 1; i++) {
str.append(namespaceList[i].toUpperCase());
str.append("_");
}
str.append(namespaceList[namespaceList.length - 1].toUpperCase());
return str.toString();
}
void writeNameSpaceBegin(Writer w, String [] namespaceList) throws IOException {
w.write("namespace ");
w.write(getNestedNameSpace(namespaceList));
w.write(" {\n");
}
String getNestedNameSpace(String [] namespaceList) {
return Arrays.stream(namespaceList).map(String::toString).collect(Collectors.joining("::"));
}
void writeNameSpaceEnd(Writer w, String [] namespaceList) throws IOException {
w.write("}
w.write(getNestedNameSpace(namespaceList));
w.write("\n");
}
void writeBodyFile(Writer w, CNode root, String subdir, NormalizedDefinition nd) throws IOException {
writeBodyHeader(w, root, subdir);
writeStaticMemberDefinitions(w, root, nd);
writeDefinition(w, root, null);
writeBodyFooter(w, root);
}
void writeBodyHeader(Writer w, CNode root, String subdir) throws IOException {
if (subdir == null) {
w.write("
} else {
w.write("
}
w.write("\n");
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("\n");
writeNameSpaceBegin(w, generateCppNameSpace(root));
w.write("\nnamespace internal {\n\n");
w.write("using ::config::ConfigParser;\n");
w.write("using ::config::InvalidConfigException;\n");
w.write("using ::config::ConfigInstance;\n");
w.write("using ::config::ConfigValue;\n");
w.write("using namespace vespalib::slime::convenience;\n");
w.write("\n");
}
void writeStaticMemberDefinitions(Writer w, CNode root, NormalizedDefinition nd) throws IOException {
String typeName = getInternalClassName(root);
w.write("const vespalib::string " + typeName + "::CONFIG_DEF_MD5(\"" + root.defMd5 + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_VERSION(\"" + root.defVersion + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAME(\"" + root.defName + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAMESPACE(\"" + root.getNamespace() + "\");\n"
+ "const int64_t " + typeName + "::CONFIG_DEF_SERIALIZE_VERSION(1);\n");
w.write("const static vespalib::string __internalDefSchema[] = {\n");
for (String line : nd.getNormalizedContent()) {
w.write("\"" + line.replace("\"", "\\\"") + "\",\n");
}
w.write("};\n");
w.write("const std::vector<vespalib::string> " + typeName + "::CONFIG_DEF_SCHEMA(__internalDefSchema,\n");
w.write(" __internalDefSchema + (sizeof(__internalDefSchema) / \n");
w.write(" sizeof(__internalDefSchema[0])));\n");
w.write("\n");
}
void writeDefinition(Writer w, CNode node, String fullClassName) throws IOException {
boolean root = false;
if (fullClassName == null) {
fullClassName = getInternalClassName(node);
root = true;
}
final String parent = fullClassName + "::";
java.util.Set<String> declaredTypes = new java.util.HashSet<String>();
for (CNode child : node.getChildren()) {
boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf);
if (complexType && !declaredTypes.contains(child.getName())) {
String typeName = getTypeName(child, false);
declaredTypes.add(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child;
w.write(parent + typeName + "\n"
+ parent + "get" + typeName + "(const vespalib::string& name)\n"
+ "{\n"
);
for (int i=0; i<leaf.getLegalValues().length; ++i) {
w.write(" " + (i != 0 ? "} else " : ""));
w.write("if (name == \"" + leaf.getLegalValues()[i] + "\") {\n"
+ " return " + leaf.getLegalValues()[i] + ";\n");
}
w.write(" } else {\n"
+ " throw InvalidConfigException(\"Illegal enum value '\" + name + \"'\");\n"
+ " }\n"
+ "}\n"
+ "\n"
);
w.write("vespalib::string\n"
+ parent + "get" + typeName + "Name(" + typeName + " t)\n"
+ "{\n"
+ " switch (t) {\n"
);
for (int i=0; i<leaf.getLegalValues().length; ++i) {
w.write(" case " + leaf.getLegalValues()[i] + ": return \"" + leaf.getLegalValues()[i] + "\";\n");
}
w.write(" default:\n"
+ " {\n"
+ " vespalib::asciistream ost;\n"
+ " ost << \"UNKNOWN(\" << t << \")\";\n"
+ " return ost.str();\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "\n"
);
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector) {\n");
w.write(" if (__inspector.valid()) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write(" }\n");
w.write(" throw InvalidConfigException(\"Value for '\" + __fieldName + \"' required but not found\");\n");
w.write("}\n");
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::slime::Inspector & __inspector) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write("}\n");
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault) {\n");
w.write(" if (__inspector.valid()) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write(" }\n");
w.write(" return __eDefault;\n");
w.write("}\n\n");
} else {
writeDefinition(w, child, parent + typeName);
}
}
}
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
w.write(parent + typeName + "()\n");
for (int i=0; i<node.getChildren().length; ++i) {
CNode child = node.getChildren()[i];
String childName = getIdentifier(child.getName());
if (i == 0) {
w.write(" : " + childName + "(");
} else {
w.write("),\n " + childName + "(");
}
if (child.isArray || child.isMap) {
} else if (child instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) child;
if (leaf.getDefaultValue() != null) {
w.write(getDefaultValue(leaf));
} else {
if (leaf.getType().equals("bool")) {
w.write("false");
} else if (leaf.getType().equals("int")) {
w.write("0");
} else if (leaf.getType().equals("double")) {
w.write("0");
} else if (leaf.getType().equals("string")) {
} else if (leaf.getType().equals("enum")) {
LeafCNode.EnumLeaf enumNode = (LeafCNode.EnumLeaf) leaf;
w.write(enumNode.getLegalValues()[0]);
} else if (leaf.getType().equals("reference")) {
} else if (leaf.getType().equals("file")) {
}
}
}
}
if (node.getChildren().length > 0)
w.write(")\n");
w.write(""
+ "{\n"
+ "}\n"
+ "\n"
);
if (root) {
writeConfigClassCopyConstructorDefinition(w, fullClassName, typeName);
writeConfigClassAssignmentOperatorDefinition(w, fullClassName, typeName);
} else {
writeClassCopyConstructorDefinition(w, fullClassName, node);
writeClassAssignmentOperatorDefinition(w, fullClassName, node);
}
writeDestructor(w, parent, typeName);
String indent = " ";
if (root) {
w.write(typeName + "::" + typeName + "(const ConfigValue & __value)\n"
+ "{\n"
+ indent + "try {\n");
indent = " ";
w.write(indent + "const std::vector<vespalib::string> & __lines(__value.getLines());\n");
} else {
w.write(parent + typeName + "(const std::vector<vespalib::string> & __lines)\n"
+ "{\n");
}
w.write(""
+ indent + "std::set<vespalib::string> __remainingValuesToParse("
+ "__lines.begin(), __lines.end());\n");
w.write(indent + "for(std::set<vespalib::string>::iterator __rVTPiter = __remainingValuesToParse.begin();\n"
+ indent + " __rVTPiter != __remainingValuesToParse.end();)\n"
+ indent + "{\n"
+ indent + " if (ConfigParser::stripWhitespace(*__rVTPiter).empty()) {\n"
+ indent + " std::set<vespalib::string>::iterator __rVTPiter2 = __rVTPiter++;\n"
+ indent + " __remainingValuesToParse.erase(__rVTPiter2);\n"
+ indent + " } else {\n"
+ indent + " ++__rVTPiter;\n"
+ indent + " }\n"
+ indent + "}\n");
for (CNode child : node.getChildren()) {
String childType = getTypeName(child, false);
String childName = getIdentifier(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
if (child.isArray) {
w.write(indent + "std::vector<vespalib::string> " + childName + "__ValueList(\n ");
} else if (child.isMap) {
w.write(indent + "std::map<vespalib::string, vespalib::string> " + childName + "__ValueMap(\n ");
} else {
w.write(indent + childName + " = get" + childType + "(");
}
childType = "vespalib::string";
} else {
w.write(indent + childName + " = ");
}
if (child.isArray) {
w.write("ConfigParser::parseArray<" + childType + ">(\""
+ child.getName() + "\", __lines)");
} else if (child.isMap) {
w.write("ConfigParser::parseMap<" + childType + ">(\""
+ child.getName() + "\", __lines)");
} else {
if (child instanceof LeafCNode) {
w.write("ConfigParser::parse<" + childType + ">(\""
+ child.getName() + "\", __lines");
} else {
w.write("ConfigParser::parseStruct<" + childType + ">(\""
+ child.getName() + "\", __lines");
}
if (child instanceof LeafCNode && ((LeafCNode) child).getDefaultValue() != null) {
LeafCNode leaf = (LeafCNode) child;
if (leaf.getDefaultValue().getValue() != null) {
String defaultVal = getDefaultValue(leaf);
if (leaf instanceof LeafCNode.EnumLeaf) {
defaultVal = '"' + defaultVal + '"';
}
w.write(", " + defaultVal);
}
}
w.write(")");
}
if (child instanceof LeafCNode.EnumLeaf) {
childType = getTypeName(child, false);
w.write(");\n");
if (child.isArray) {
w.write(indent + childName + ".reserve(" + childName + "__ValueList.size());\n"
+ indent + "for (std::vector<vespalib::string>::const_iterator __it\n"
+ indent + " = " + childName + "__ValueList.begin();\n"
+ indent + " __it != " + childName + "__ValueList.end(); ++__it)\n"
+ indent + "{\n"
+ indent + " " + childName + ".push_back(get" + childType + "(*__it));\n"
+ indent + "}\n"
);
} else if (child.isMap) {
w.write(indent + "typedef std::map<vespalib::string, vespalib::string> __ValueMap;\n");
w.write(indent + "for (__ValueMap::iterator __it(" + childName + "__ValueMap.begin()), __mt(" + childName + "__ValueMap.end()); __it != __mt; __it++) {\n"
+ " " + childName + "[__it->first] = get" + childType + "(__it->second);\n"
+ "}\n"
);
}
} else {
w.write(";\n");
}
w.write(indent + "ConfigParser::stripLinesForKey(\""
+ child.getName() + "\", "
+ "__remainingValuesToParse);\n");
}
if (root) {
indent = " ";
w.write(indent + "} catch (InvalidConfigException & __ice) {\n");
w.write(indent + " throw InvalidConfigException(\"Error parsing config '\" + CONFIG_DEF_NAME + \"' in namespace '\" + CONFIG_DEF_NAMESPACE + \"'"
+ ": \" + __ice.getMessage());\n"
+ indent + "}\n");
}
w.write("}\n"
+ "\n"
);
String lineBreak = (parent.length() + typeName.length() < 50 ? "" : "\n");
w.write("bool\n"
+ parent + lineBreak + "operator==(const " + typeName + "& __rhs) const\n"
+ "{\n"
+ " return ("
);
for (int i = 0; i<node.getChildren().length; ++i) {
CNode child = node.getChildren()[i];
String childName = getIdentifier(child.getName());
if (i != 0) {
w.write(" &&\n ");
}
w.write(childName + " == __rhs." + childName);
}
w.write(");\n"
+ "}\n"
+ "\n"
);
lineBreak = (parent.length() + typeName.length() < 50 ? "" : "\n");
w.write("bool\n"
+ parent + lineBreak + "operator!=(const " + typeName + "& __rhs) const\n"
+ "{\n"
+ " return !(operator==(__rhs));\n"
+ "}\n"
+ "\n"
);
writeSlimeEncoder(w, node, parent, root);
writeSlimeDecoder(w, node, parent, root);
writeSlimeConstructor(w, node, parent, root);
}
public void writeSlimeEncoder(Writer w, CNode node, String parent, boolean root) throws IOException
{
String indent = " ";
if (root) {
w.write("void\n"
+ parent + "serialize(::config::ConfigDataBuffer & __buffer) const\n"
+ "{\n");
w.write(indent + "vespalib::Slime & __slime(__buffer.slimeObject());\n");
w.write(indent + "vespalib::slime::Cursor & __croot = __slime.setObject();\n");
w.write(indent + "__croot.setDouble(\"version\", CONFIG_DEF_SERIALIZE_VERSION);\n");
w.write(indent + "vespalib::slime::Cursor & __key = __croot.setObject(\"configKey\");\n");
w.write(indent + "__key.setString(\"defName\", vespalib::Memory(CONFIG_DEF_NAME));\n");
w.write(indent + "__key.setString(\"defNamespace\", vespalib::Memory(CONFIG_DEF_NAMESPACE));\n");
w.write(indent + "__key.setString(\"defMd5\", vespalib::Memory(CONFIG_DEF_MD5));\n");
w.write(indent + "vespalib::slime::Cursor & __keySchema =__key.setArray(\"defSchema\");\n");
w.write(indent + "for (size_t i = 0; i < CONFIG_DEF_SCHEMA.size(); i++) {\n");
w.write(indent + " __keySchema.addString(vespalib::Memory(CONFIG_DEF_SCHEMA[i]));\n");
w.write(indent + "}\n");
w.write(indent + "vespalib::slime::Cursor & __cursor = __croot.setObject(\"configPayload\");\n");
} else {
w.write("void\n"
+ parent + "serialize(vespalib::slime::Cursor & __cursor) const\n"
+ "{\n");
}
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
w.write(indent + "{\n");
indent = " ";
w.write(indent + "vespalib::slime::Cursor & __c = __cursor.setObject(\"" + child.getName() + "\");\n");
if (child.isArray) {
w.write(indent + "__c.setString(\"type\", \"array\");\n");
w.write(indent + "vespalib::slime::Cursor & __c2 = __c.setArray(\"value\");\n");
w.write(indent + "for (size_t __i = 0; __i < " + childName + ".size(); __i++) {\n");
w.write(indent + " vespalib::slime::Cursor & __c3 = __c2.addObject();\n");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + " __c3.setString(\"type\", \"enum\");\n");
w.write(indent + " __c3.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(" + childName + "[__i])));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + " __c3.setString(\"type\", \"" + type + "\");\n");
w.write(indent + " __c3.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(" + childName + "[__i]));\n");
} else {
w.write("(\"value\", " + childName + "[__i]);\n");
}
} else {
w.write(indent + " __c3.setString(\"type\", \"struct\");\n");
w.write(indent + " Cursor & __c4 = __c3.setObject(\"value\");\n");
w.write(indent + " " + childName + "[__i].serialize(__c4);\n");
}
w.write(indent + "}\n");
} else if (child.isMap) {
w.write(indent + "__c.setString(\"type\", \"map\");\n");
w.write(indent + "vespalib::slime::Cursor & __c2 = __c.setArray(\"value\");\n");
String childMapType = getTypeName(child, true);
w.write(indent + "for (" + childMapType + "::const_iterator it(" + childName + ".begin()), mt(" + childName + ".end()); it != mt; it++) {\n");
w.write(indent + " vespalib::slime::Cursor & __c3 = __c2.addObject();\n");
w.write(indent + " __c3.setString(\"key\", vespalib::Memory(it->first));\n");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + " __c3.setString(\"type\", \"enum\");\n");
w.write(indent + " __c3.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(it->second)));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + " __c3.setString(\"type\", \"" + type + "\");\n");
w.write(indent + " __c3.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(it->second));\n");
} else {
w.write("(\"value\", it->second);\n");
}
} else {
w.write(indent + " __c3.setString(\"type\", \"struct\");\n");
w.write(indent + " Cursor & __c4 = __c3.setObject(\"value\");\n");
w.write(indent + " it->second.serialize(__c4);\n");
}
w.write(indent + "}\n");
} else {
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + "__c.setString(\"type\", \"enum\");\n");
w.write(indent + "__c.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(" + childName + ")));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + "__c.setString(\"type\", \"" + type + "\");\n");
w.write(indent + "__c.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(" + childName + "));\n");
} else {
w.write("(\"value\", " + childName + ");\n");
}
} else {
w.write(indent + "__c.setString(\"type\", \"struct\");\n");
w.write(indent + "Cursor & __c2 = __c.setObject(\"value\");\n");
w.write(indent + childName + ".serialize(__c2);\n");
}
}
indent = " ";
w.write(indent + "}\n");
}
w.write("}\n\n");
}
public void writeSlimeDecoder(Writer w, CNode node, String parent, boolean root) throws IOException {
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
String indent = " ";
if (root) {
w.write(""
+ typeName + "::" + typeName + "(const ::config::ConfigDataBuffer & __buffer)\n"
+ "{\n");
w.write(indent + "const vespalib::Slime & __slime(__buffer.slimeObject());\n");
w.write(indent + "vespalib::slime::Inspector & __croot = __slime.get();\n");
w.write(indent + "vespalib::slime::Inspector & __inspector = __croot[\"configPayload\"];\n");
} else {
w.write(""
+ parent + typeName + "(const vespalib::slime::Inspector & __inspector)\n"
+ "{\n");
}
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
String inspectorLine = "__inspector[\"" + child.getName() + "\"][\"value\"]";
if (child.isArray) {
w.write(indent + "for (size_t __i = 0; __i < " + inspectorLine + ".children(); __i++) {\n");
w.write(indent + " " + childName + ".push_back(");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + "[__i][\"value\"])");
}
w.write(");\n");
w.write(indent + "}\n");
} else if (child.isMap) {
w.write(indent + "for (size_t __i = 0; __i < " + inspectorLine + ".children(); __i++) {\n");
w.write(indent + " " + childName + "[" + inspectorLine + "[__i][\"key\"].asString().make_string()] = ");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + "[__i][\"value\"])");
}
w.write(";\n");
w.write(indent + "}\n");
} else {
w.write(indent + childName + " = ");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + ".as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + ".as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + ".as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + ")");
}
w.write(";\n");
}
}
w.write("}\n\n");
}
public void writeSlimeConstructor(Writer w, CNode node, String parent, boolean root) throws IOException {
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
String indent = " ";
if (root) {
w.write(""
+ typeName + "::" + typeName + "(const ::config::ConfigPayload & __payload)\n"
+ "{\n");
} else {
w.write(""
+ parent + typeName + "(const ::config::ConfigPayload & __payload)\n"
+ "{\n");
}
w.write(indent + "const vespalib::slime::Inspector & __inspector(__payload.get());\n");
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
String childInspector = "__inspector[\"" + child.getName() + "\"]";
if (child.isArray) {
String inserterName = "__" + childName + "Inserter";
w.write(indent + "::config::internal::VectorInserter<" + childType);
if (child instanceof LeafCNode.EnumLeaf) {
w.write(", Internal" + childType + "Converter");
}
w.write("> " + inserterName + "(" + childName + ");\n");
w.write(indent + childInspector + ".traverse(" + inserterName + ");\n");
} else if (child.isMap) {
String inserterName = "__" + childName + "Inserter";
w.write(indent + "::config::internal::MapInserter<" + childType);
if (child instanceof LeafCNode.EnumLeaf) {
w.write(", Internal" + childType + "Converter");
}
w.write("> " + inserterName + "(" + childName + ");\n");
w.write(indent + childInspector + ".traverse(" + inserterName + ");\n");
} else {
w.write(indent + childName + " = ");
if (child instanceof LeafCNode.EnumLeaf) {
w.write("Internal" + childType + "Converter");
} else {
w.write("::config::internal::ValueConverter<" + childType + ">");
}
if (child instanceof LeafCNode && ((LeafCNode) child).getDefaultValue() != null) {
LeafCNode leaf = (LeafCNode) child;
String defaultValue = getDefaultValue(leaf);
w.write("()(" + childInspector + ", " + defaultValue + ");\n");
} else if (child instanceof InnerCNode) {
w.write("()(" + childInspector + ");\n");
} else {
w.write("()(\"" + child.getName() + "\", " + childInspector + ");\n");
}
}
}
w.write("}\n\n");
}
void writeBodyFooter(Writer w, CNode root) throws IOException {
w.write("}
writeNameSpaceEnd(w, generateCppNameSpace(root));
}
String getDefaultValue(LeafCNode leaf) {
String defaultVal = leaf.getDefaultValue().getStringRepresentation();
if (leaf.getType().equals("string") && defaultVal.equals("null"))
throw new CodegenRuntimeException("Default value null not allowed for C++ config");
if (leaf.getType().equals("long") && "-9223372036854775808".equals(defaultVal)) {
return "LONG_MIN";
} else if (leaf.getType().equals("int") && "-2147483648".equals(defaultVal)) {
return "INT_MIN";
} else {
return defaultVal;
}
}
} | class CppClassBuilder implements ClassBuilder {
private final CNode root;
private final NormalizedDefinition nd;
private final File rootDir;
private final String relativePathUnderRoot;
private static final Map<String, String> vectorTypeDefs;
static {
Map<String, String> map = new HashMap<String, String>();
map.put("bool", "BoolVector");
map.put("int32_t", "IntVector");
map.put("int64_t", "LongVector");
map.put("double", "DoubleVector");
map.put("vespalib::string", "StringVector");
vectorTypeDefs = Collections.unmodifiableMap(map);
}
private static final Map<String, String> mapTypeDefs;
static {
Map<String, String> map = new HashMap<>();
map.put("bool", "BoolMap");
map.put("int32_t", "IntMap");
map.put("int64_t", "LongMap");
map.put("double", "DoubleMap");
map.put("vespalib::string", "StringMap");
mapTypeDefs = Collections.unmodifiableMap(map);
}
private static final Map<String, String> slimeTypeMap;
static {
Map<String, String> map = new HashMap<String, String>();
map.put("bool", "Bool");
map.put("int", "Long");
map.put("long", "Long");
map.put("double", "Double");
map.put("string", "String");
map.put("enum", "String");
map.put("file", "String");
map.put("reference", "String");
slimeTypeMap = Collections.unmodifiableMap(map);
}
public CppClassBuilder(CNode root, NormalizedDefinition nd, File rootDir, String relativePathUnderRoot) {
this.root = root;
this.nd = nd;
this.rootDir = rootDir;
this.relativePathUnderRoot = relativePathUnderRoot;
}
public void createConfigClasses() {
generateConfig(root, nd);
}
String readFile(File f) throws IOException {
if (!f.isFile()) return null;
StringBuilder sb = new StringBuilder();
try (BufferedReader sr = new BufferedReader(new FileReader(f))) {
while (true) {
String line = sr.readLine();
if (line == null) break;
sb.append(line).append("\n");
}
return sb.toString();
}
}
void writeFile(File f, String content) throws IOException {
FileWriter fw = new FileWriter(f);
fw.write(content);
fw.close();
}
void generateConfig(CNode root, NormalizedDefinition nd) {
try{
StringWriter headerWriter = new StringWriter();
StringWriter bodyWriter = new StringWriter();
writeHeaderFile(headerWriter, root);
writeBodyFile(bodyWriter, root, relativePathUnderRoot, nd);
String newHeader = headerWriter.toString();
String newBody = bodyWriter.toString();
File headerFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "h"));
File bodyFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "cpp"));
String oldHeader = readFile(headerFile);
String oldBody = readFile(bodyFile);
if (oldHeader == null || !oldHeader.equals(newHeader)) {
writeFile(headerFile, newHeader);
}
if (oldBody == null || !oldBody.equals(newBody)) {
writeFile(bodyFile, newBody);
}
} catch (IOException e) {
e.printStackTrace();
}
}
String getFileName(CNode node, String extension) {
return "config-" + node.getName() + "." + extension;
}
static String removeDashesAndUpperCaseAllFirstChars(String source, boolean capitalizeFirst) {
String parts[] = source.split("[-_]");
StringBuilder sb = new StringBuilder();
for (String s : parts) {
sb.append(s.substring(0, 1).toUpperCase()).append(s.substring(1));
}
String result = sb.toString();
if (!capitalizeFirst) {
result = result.substring(0,1).toLowerCase() + result.substring(1);
}
return result;
}
/** Convert name of type to the name we want to use in macro ifdefs in file. */
String getDefineName(String name) {
return name.toUpperCase().replace("-", "");
}
/** Convert name of type to the name we want to use as type name in the generated code. */
static String getTypeName(String name) {
return removeDashesAndUpperCaseAllFirstChars(name, true);
}
/** Convert name of an identifier from value in def file to name to use in C++ file. */
String getIdentifier(String name) {
return removeDashesAndUpperCaseAllFirstChars(name, false);
}
void writeHeaderFile(Writer w, CNode root) throws IOException {
writeHeaderHeader(w, root);
writeHeaderPublic(w, root);
writeHeaderFooter(w, root);
}
void writeHeaderPublic(Writer w, CNode root) throws IOException {
w.write("public:\n");
writeHeaderTypeDefs(w, root, " ");
writeTypeDeclarations(w, root, " ");
writeHeaderFunctionDeclarations(w, getTypeName(root, false), root, " ");
writeStaticMemberDeclarations(w, " ");
writeMembers(w, root, " ");
}
String [] generateCppNameSpace(CNode root) {
String namespace = root.getNamespace();
if (namespace.contains(".")) {
return namespace.split("\\.");
}
return new String[]{namespace};
}
String generateCppNameSpaceString(String[] namespaceList) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < namespaceList.length - 1; i++) {
str.append(namespaceList[i]);
str.append("::");
}
str.append(namespaceList[namespaceList.length - 1]);
return str.toString();
}
String generateCppNameSpaceDefine(String[] namespaceList) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < namespaceList.length - 1; i++) {
str.append(namespaceList[i].toUpperCase());
str.append("_");
}
str.append(namespaceList[namespaceList.length - 1].toUpperCase());
return str.toString();
}
void writeNameSpaceBegin(Writer w, String [] namespaceList) throws IOException {
w.write("namespace ");
w.write(getNestedNameSpace(namespaceList));
w.write(" {\n");
}
String getNestedNameSpace(String [] namespaceList) {
return Arrays.stream(namespaceList).map(String::toString).collect(Collectors.joining("::"));
}
void writeNameSpaceEnd(Writer w, String [] namespaceList) throws IOException {
w.write("}
w.write(getNestedNameSpace(namespaceList));
w.write("\n");
}
void writeBodyFile(Writer w, CNode root, String subdir, NormalizedDefinition nd) throws IOException {
writeBodyHeader(w, root, subdir);
writeStaticMemberDefinitions(w, root, nd);
writeDefinition(w, root, null);
writeBodyFooter(w, root);
}
void writeBodyHeader(Writer w, CNode root, String subdir) throws IOException {
if (subdir == null) {
w.write("
} else {
w.write("
}
w.write("\n");
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("\n");
writeNameSpaceBegin(w, generateCppNameSpace(root));
w.write("\nnamespace internal {\n\n");
w.write("using ::config::ConfigParser;\n");
w.write("using ::config::InvalidConfigException;\n");
w.write("using ::config::ConfigInstance;\n");
w.write("using ::config::ConfigValue;\n");
w.write("using namespace vespalib::slime::convenience;\n");
w.write("\n");
}
void writeStaticMemberDefinitions(Writer w, CNode root, NormalizedDefinition nd) throws IOException {
String typeName = getInternalClassName(root);
w.write("const vespalib::string " + typeName + "::CONFIG_DEF_MD5(\"" + root.defMd5 + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_VERSION(\"" + root.defVersion + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAME(\"" + root.defName + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAMESPACE(\"" + root.getNamespace() + "\");\n"
+ "const int64_t " + typeName + "::CONFIG_DEF_SERIALIZE_VERSION(1);\n");
w.write("const static vespalib::string __internalDefSchema[] = {\n");
for (String line : nd.getNormalizedContent()) {
w.write("\"" + line.replace("\"", "\\\"") + "\",\n");
}
w.write("};\n");
w.write("const std::vector<vespalib::string> " + typeName + "::CONFIG_DEF_SCHEMA(__internalDefSchema,\n");
w.write(" __internalDefSchema + (sizeof(__internalDefSchema) / \n");
w.write(" sizeof(__internalDefSchema[0])));\n");
w.write("\n");
}
void writeDefinition(Writer w, CNode node, String fullClassName) throws IOException {
boolean root = false;
if (fullClassName == null) {
fullClassName = getInternalClassName(node);
root = true;
}
final String parent = fullClassName + "::";
java.util.Set<String> declaredTypes = new java.util.HashSet<String>();
for (CNode child : node.getChildren()) {
boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf);
if (complexType && !declaredTypes.contains(child.getName())) {
String typeName = getTypeName(child, false);
declaredTypes.add(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child;
w.write(parent + typeName + "\n"
+ parent + "get" + typeName + "(const vespalib::string& name)\n"
+ "{\n"
);
for (int i=0; i<leaf.getLegalValues().length; ++i) {
w.write(" " + (i != 0 ? "} else " : ""));
w.write("if (name == \"" + leaf.getLegalValues()[i] + "\") {\n"
+ " return " + leaf.getLegalValues()[i] + ";\n");
}
w.write(" } else {\n"
+ " throw InvalidConfigException(\"Illegal enum value '\" + name + \"'\");\n"
+ " }\n"
+ "}\n"
+ "\n"
);
w.write("vespalib::string\n"
+ parent + "get" + typeName + "Name(" + typeName + " t)\n"
+ "{\n"
+ " switch (t) {\n"
);
for (int i=0; i<leaf.getLegalValues().length; ++i) {
w.write(" case " + leaf.getLegalValues()[i] + ": return \"" + leaf.getLegalValues()[i] + "\";\n");
}
w.write(" default:\n"
+ " {\n"
+ " vespalib::asciistream ost;\n"
+ " ost << \"UNKNOWN(\" << t << \")\";\n"
+ " return ost.str();\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "\n"
);
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector) {\n");
w.write(" if (__inspector.valid()) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write(" }\n");
w.write(" throw InvalidConfigException(\"Value for '\" + __fieldName + \"' required but not found\");\n");
w.write("}\n");
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::slime::Inspector & __inspector) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write("}\n");
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault) {\n");
w.write(" if (__inspector.valid()) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write(" }\n");
w.write(" return __eDefault;\n");
w.write("}\n\n");
} else {
writeDefinition(w, child, parent + typeName);
}
}
}
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
w.write(parent + typeName + "()\n");
for (int i=0; i<node.getChildren().length; ++i) {
CNode child = node.getChildren()[i];
String childName = getIdentifier(child.getName());
if (i == 0) {
w.write(" : " + childName + "(");
} else {
w.write("),\n " + childName + "(");
}
if (child.isArray || child.isMap) {
} else if (child instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) child;
if (leaf.getDefaultValue() != null) {
w.write(getDefaultValue(leaf));
} else {
if (leaf.getType().equals("bool")) {
w.write("false");
} else if (leaf.getType().equals("int")) {
w.write("0");
} else if (leaf.getType().equals("double")) {
w.write("0");
} else if (leaf.getType().equals("string")) {
} else if (leaf.getType().equals("enum")) {
LeafCNode.EnumLeaf enumNode = (LeafCNode.EnumLeaf) leaf;
w.write(enumNode.getLegalValues()[0]);
} else if (leaf.getType().equals("reference")) {
} else if (leaf.getType().equals("file")) {
}
}
}
}
if (node.getChildren().length > 0)
w.write(")\n");
w.write(""
+ "{\n"
+ "}\n"
+ "\n"
);
if (root) {
writeConfigClassCopyConstructorDefinition(w, fullClassName, typeName);
writeConfigClassAssignmentOperatorDefinition(w, fullClassName, typeName);
} else {
writeClassCopyConstructorDefinition(w, fullClassName, node);
writeClassAssignmentOperatorDefinition(w, fullClassName, node);
}
writeDestructor(w, parent, typeName);
String indent = " ";
if (root) {
w.write(typeName + "::" + typeName + "(const ConfigValue & __value)\n"
+ "{\n"
+ indent + "try {\n");
indent = " ";
w.write(indent + "const std::vector<vespalib::string> & __lines(__value.getLines());\n");
} else {
w.write(parent + typeName + "(const std::vector<vespalib::string> & __lines)\n"
+ "{\n");
}
w.write(""
+ indent + "std::set<vespalib::string> __remainingValuesToParse("
+ "__lines.begin(), __lines.end());\n");
w.write(indent + "for(std::set<vespalib::string>::iterator __rVTPiter = __remainingValuesToParse.begin();\n"
+ indent + " __rVTPiter != __remainingValuesToParse.end();)\n"
+ indent + "{\n"
+ indent + " if (ConfigParser::stripWhitespace(*__rVTPiter).empty()) {\n"
+ indent + " std::set<vespalib::string>::iterator __rVTPiter2 = __rVTPiter++;\n"
+ indent + " __remainingValuesToParse.erase(__rVTPiter2);\n"
+ indent + " } else {\n"
+ indent + " ++__rVTPiter;\n"
+ indent + " }\n"
+ indent + "}\n");
for (CNode child : node.getChildren()) {
String childType = getTypeName(child, false);
String childName = getIdentifier(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
if (child.isArray) {
w.write(indent + "std::vector<vespalib::string> " + childName + "__ValueList(\n ");
} else if (child.isMap) {
w.write(indent + "std::map<vespalib::string, vespalib::string> " + childName + "__ValueMap(\n ");
} else {
w.write(indent + childName + " = get" + childType + "(");
}
childType = "vespalib::string";
} else {
w.write(indent + childName + " = ");
}
if (child.isArray) {
w.write("ConfigParser::parseArray<" + childType + ">(\""
+ child.getName() + "\", __lines)");
} else if (child.isMap) {
w.write("ConfigParser::parseMap<" + childType + ">(\""
+ child.getName() + "\", __lines)");
} else {
if (child instanceof LeafCNode) {
w.write("ConfigParser::parse<" + childType + ">(\""
+ child.getName() + "\", __lines");
} else {
w.write("ConfigParser::parseStruct<" + childType + ">(\""
+ child.getName() + "\", __lines");
}
if (child instanceof LeafCNode && ((LeafCNode) child).getDefaultValue() != null) {
LeafCNode leaf = (LeafCNode) child;
if (leaf.getDefaultValue().getValue() != null) {
String defaultVal = getDefaultValue(leaf);
if (leaf instanceof LeafCNode.EnumLeaf) {
defaultVal = '"' + defaultVal + '"';
}
w.write(", " + defaultVal);
}
}
w.write(")");
}
if (child instanceof LeafCNode.EnumLeaf) {
childType = getTypeName(child, false);
w.write(");\n");
if (child.isArray) {
w.write(indent + childName + ".reserve(" + childName + "__ValueList.size());\n"
+ indent + "for (std::vector<vespalib::string>::const_iterator __it\n"
+ indent + " = " + childName + "__ValueList.begin();\n"
+ indent + " __it != " + childName + "__ValueList.end(); ++__it)\n"
+ indent + "{\n"
+ indent + " " + childName + ".push_back(get" + childType + "(*__it));\n"
+ indent + "}\n"
);
} else if (child.isMap) {
w.write(indent + "typedef std::map<vespalib::string, vespalib::string> __ValueMap;\n");
w.write(indent + "for (__ValueMap::iterator __it(" + childName + "__ValueMap.begin()), __mt(" + childName + "__ValueMap.end()); __it != __mt; __it++) {\n"
+ " " + childName + "[__it->first] = get" + childType + "(__it->second);\n"
+ "}\n"
);
}
} else {
w.write(";\n");
}
w.write(indent + "ConfigParser::stripLinesForKey(\""
+ child.getName() + "\", "
+ "__remainingValuesToParse);\n");
}
if (root) {
indent = " ";
w.write(indent + "} catch (InvalidConfigException & __ice) {\n");
w.write(indent + " throw InvalidConfigException(\"Error parsing config '\" + CONFIG_DEF_NAME + \"' in namespace '\" + CONFIG_DEF_NAMESPACE + \"'"
+ ": \" + __ice.getMessage());\n"
+ indent + "}\n");
}
w.write("}\n"
+ "\n"
);
String lineBreak = (parent.length() + typeName.length() < 50 ? "" : "\n");
w.write("bool\n"
+ parent + lineBreak + "operator==(const " + typeName + "& __rhs) const\n"
+ "{\n"
+ " return ("
);
for (int i = 0; i<node.getChildren().length; ++i) {
CNode child = node.getChildren()[i];
String childName = getIdentifier(child.getName());
if (i != 0) {
w.write(" &&\n ");
}
w.write(childName + " == __rhs." + childName);
}
w.write(");\n"
+ "}\n"
+ "\n"
);
lineBreak = (parent.length() + typeName.length() < 50 ? "" : "\n");
w.write("bool\n"
+ parent + lineBreak + "operator!=(const " + typeName + "& __rhs) const\n"
+ "{\n"
+ " return !(operator==(__rhs));\n"
+ "}\n"
+ "\n"
);
writeSlimeEncoder(w, node, parent, root);
writeSlimeDecoder(w, node, parent, root);
writeSlimeConstructor(w, node, parent, root);
}
public void writeSlimeEncoder(Writer w, CNode node, String parent, boolean root) throws IOException
{
String indent = " ";
if (root) {
w.write("void\n"
+ parent + "serialize(::config::ConfigDataBuffer & __buffer) const\n"
+ "{\n");
w.write(indent + "vespalib::Slime & __slime(__buffer.slimeObject());\n");
w.write(indent + "vespalib::slime::Cursor & __croot = __slime.setObject();\n");
w.write(indent + "__croot.setDouble(\"version\", CONFIG_DEF_SERIALIZE_VERSION);\n");
w.write(indent + "vespalib::slime::Cursor & __key = __croot.setObject(\"configKey\");\n");
w.write(indent + "__key.setString(\"defName\", vespalib::Memory(CONFIG_DEF_NAME));\n");
w.write(indent + "__key.setString(\"defNamespace\", vespalib::Memory(CONFIG_DEF_NAMESPACE));\n");
w.write(indent + "__key.setString(\"defMd5\", vespalib::Memory(CONFIG_DEF_MD5));\n");
w.write(indent + "vespalib::slime::Cursor & __keySchema =__key.setArray(\"defSchema\");\n");
w.write(indent + "for (size_t i = 0; i < CONFIG_DEF_SCHEMA.size(); i++) {\n");
w.write(indent + " __keySchema.addString(vespalib::Memory(CONFIG_DEF_SCHEMA[i]));\n");
w.write(indent + "}\n");
w.write(indent + "vespalib::slime::Cursor & __cursor = __croot.setObject(\"configPayload\");\n");
} else {
w.write("void\n"
+ parent + "serialize(vespalib::slime::Cursor & __cursor) const\n"
+ "{\n");
}
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
w.write(indent + "{\n");
indent = " ";
w.write(indent + "vespalib::slime::Cursor & __c = __cursor.setObject(\"" + child.getName() + "\");\n");
if (child.isArray) {
w.write(indent + "__c.setString(\"type\", \"array\");\n");
w.write(indent + "vespalib::slime::Cursor & __c2 = __c.setArray(\"value\");\n");
w.write(indent + "for (size_t __i = 0; __i < " + childName + ".size(); __i++) {\n");
w.write(indent + " vespalib::slime::Cursor & __c3 = __c2.addObject();\n");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + " __c3.setString(\"type\", \"enum\");\n");
w.write(indent + " __c3.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(" + childName + "[__i])));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + " __c3.setString(\"type\", \"" + type + "\");\n");
w.write(indent + " __c3.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(" + childName + "[__i]));\n");
} else {
w.write("(\"value\", " + childName + "[__i]);\n");
}
} else {
w.write(indent + " __c3.setString(\"type\", \"struct\");\n");
w.write(indent + " Cursor & __c4 = __c3.setObject(\"value\");\n");
w.write(indent + " " + childName + "[__i].serialize(__c4);\n");
}
w.write(indent + "}\n");
} else if (child.isMap) {
w.write(indent + "__c.setString(\"type\", \"map\");\n");
w.write(indent + "vespalib::slime::Cursor & __c2 = __c.setArray(\"value\");\n");
String childMapType = getTypeName(child, true);
w.write(indent + "for (" + childMapType + "::const_iterator it(" + childName + ".begin()), mt(" + childName + ".end()); it != mt; it++) {\n");
w.write(indent + " vespalib::slime::Cursor & __c3 = __c2.addObject();\n");
w.write(indent + " __c3.setString(\"key\", vespalib::Memory(it->first));\n");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + " __c3.setString(\"type\", \"enum\");\n");
w.write(indent + " __c3.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(it->second)));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + " __c3.setString(\"type\", \"" + type + "\");\n");
w.write(indent + " __c3.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(it->second));\n");
} else {
w.write("(\"value\", it->second);\n");
}
} else {
w.write(indent + " __c3.setString(\"type\", \"struct\");\n");
w.write(indent + " Cursor & __c4 = __c3.setObject(\"value\");\n");
w.write(indent + " it->second.serialize(__c4);\n");
}
w.write(indent + "}\n");
} else {
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + "__c.setString(\"type\", \"enum\");\n");
w.write(indent + "__c.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(" + childName + ")));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + "__c.setString(\"type\", \"" + type + "\");\n");
w.write(indent + "__c.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(" + childName + "));\n");
} else {
w.write("(\"value\", " + childName + ");\n");
}
} else {
w.write(indent + "__c.setString(\"type\", \"struct\");\n");
w.write(indent + "Cursor & __c2 = __c.setObject(\"value\");\n");
w.write(indent + childName + ".serialize(__c2);\n");
}
}
indent = " ";
w.write(indent + "}\n");
}
w.write("}\n\n");
}
public void writeSlimeDecoder(Writer w, CNode node, String parent, boolean root) throws IOException {
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
String indent = " ";
if (root) {
w.write(""
+ typeName + "::" + typeName + "(const ::config::ConfigDataBuffer & __buffer)\n"
+ "{\n");
w.write(indent + "const vespalib::Slime & __slime(__buffer.slimeObject());\n");
w.write(indent + "vespalib::slime::Inspector & __croot = __slime.get();\n");
w.write(indent + "vespalib::slime::Inspector & __inspector = __croot[\"configPayload\"];\n");
} else {
w.write(""
+ parent + typeName + "(const vespalib::slime::Inspector & __inspector)\n"
+ "{\n");
}
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
String inspectorLine = "__inspector[\"" + child.getName() + "\"][\"value\"]";
if (child.isArray) {
w.write(indent + "for (size_t __i = 0; __i < " + inspectorLine + ".children(); __i++) {\n");
w.write(indent + " " + childName + ".push_back(");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + "[__i][\"value\"])");
}
w.write(");\n");
w.write(indent + "}\n");
} else if (child.isMap) {
w.write(indent + "for (size_t __i = 0; __i < " + inspectorLine + ".children(); __i++) {\n");
w.write(indent + " " + childName + "[" + inspectorLine + "[__i][\"key\"].asString().make_string()] = ");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + "[__i][\"value\"])");
}
w.write(";\n");
w.write(indent + "}\n");
} else {
w.write(indent + childName + " = ");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + ".as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + ".as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + ".as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + ")");
}
w.write(";\n");
}
}
w.write("}\n\n");
}
public void writeSlimeConstructor(Writer w, CNode node, String parent, boolean root) throws IOException {
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
String indent = " ";
if (root) {
w.write(""
+ typeName + "::" + typeName + "(const ::config::ConfigPayload & __payload)\n"
+ "{\n");
} else {
w.write(""
+ parent + typeName + "(const ::config::ConfigPayload & __payload)\n"
+ "{\n");
}
w.write(indent + "const vespalib::slime::Inspector & __inspector(__payload.get());\n");
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
String childInspector = "__inspector[\"" + child.getName() + "\"]";
if (child.isArray) {
String inserterName = "__" + childName + "Inserter";
w.write(indent + "::config::internal::VectorInserter<" + childType);
if (child instanceof LeafCNode.EnumLeaf) {
w.write(", Internal" + childType + "Converter");
}
w.write("> " + inserterName + "(" + childName + ");\n");
w.write(indent + childInspector + ".traverse(" + inserterName + ");\n");
} else if (child.isMap) {
String inserterName = "__" + childName + "Inserter";
w.write(indent + "::config::internal::MapInserter<" + childType);
if (child instanceof LeafCNode.EnumLeaf) {
w.write(", Internal" + childType + "Converter");
}
w.write("> " + inserterName + "(" + childName + ");\n");
w.write(indent + childInspector + ".traverse(" + inserterName + ");\n");
} else {
w.write(indent + childName + " = ");
if (child instanceof LeafCNode.EnumLeaf) {
w.write("Internal" + childType + "Converter");
} else {
w.write("::config::internal::ValueConverter<" + childType + ">");
}
if (child instanceof LeafCNode && ((LeafCNode) child).getDefaultValue() != null) {
LeafCNode leaf = (LeafCNode) child;
String defaultValue = getDefaultValue(leaf);
w.write("()(" + childInspector + ", " + defaultValue + ");\n");
} else if (child instanceof InnerCNode) {
w.write("()(" + childInspector + ");\n");
} else {
w.write("()(\"" + child.getName() + "\", " + childInspector + ");\n");
}
}
}
w.write("}\n\n");
}
void writeBodyFooter(Writer w, CNode root) throws IOException {
w.write("}
writeNameSpaceEnd(w, generateCppNameSpace(root));
}
String getDefaultValue(LeafCNode leaf) {
String defaultVal = leaf.getDefaultValue().getStringRepresentation();
if (leaf.getType().equals("string") && defaultVal.equals("null"))
throw new CodegenRuntimeException("Default value null not allowed for C++ config");
if (leaf.getType().equals("long") && "-9223372036854775808".equals(defaultVal)) {
return "LONG_MIN";
} else if (leaf.getType().equals("int") && "-2147483648".equals(defaultVal)) {
return "INT_MIN";
} else {
return defaultVal;
}
}
} |
For C++ we could remove it now, I'll check that nobody uses it | void writeHeaderHeader(Writer w, CNode root) throws IOException {
String [] namespaceList = generateCppNameSpace(root);
String namespacePrint = generateCppNameSpaceString(namespaceList);
String namespaceDefine = generateCppNameSpaceDefine(namespaceList);
String className = getTypeName(root, false);
String defineName = namespaceDefine + "_" + getDefineName(className);
w.write(""
+ "/**\n"
+ " * @class " + namespacePrint + "::" + className + "\n"
+ " * @ingroup config\n"
+ " *\n"
+ " * @brief This is an autogenerated class for handling VESPA config.\n"
+ " *\n"
+ " * This class is autogenerated by vespa from a config definition file.\n"
+ " * To subscribe to config, you need to include the config/config.h header, \n"
+ " * and create a ConfigSubscriber in order to subscribe for config.\n"
);
if (root.getComment().length() > 0) {
w.write(" *\n");
StringTokenizer st = new StringTokenizer(root.getComment(), "\n");
while (st.hasMoreTokens()) {
w.write(" * " + st.nextToken() + "\n");
}
}
w.write(""
+ " */\n"
+ "
+ "
+ "\n"
+ "
+ "
+ "
+ "
+ "\n");
w.write("namespace config {\n");
w.write(" class ConfigValue;\n");
w.write(" class ConfigPayload;\n");
w.write("}\n\n");
w.write("namespace vespalib::slime {\n");
w.write(" struct Inspector;\n");
w.write(" struct Cursor;\n");
w.write("}\n\n");
writeNameSpaceBegin(w, namespaceList);
w.write("\nnamespace internal {\n\n");
w.write(""
+ "/**\n"
+ " * This class contains the config. DO NOT USE THIS CLASS DIRECTLY. Use the typedeffed\n"
+ " * versions after this class declaration.\n"
+ " */\n"
+ "class Internal" + className + "Type : public ::config::ConfigInstance\n"
+ "{\n"
);
}
void writeTypeDeclarations(Writer w, CNode node, String indent) throws IOException {
java.util.Set<String> declaredTypes = new java.util.HashSet<String>();
for (CNode child : node.getChildren()) {
boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf);
if (complexType && !declaredTypes.contains(child.getName())) {
String typeName = getTypeName(child, false);
declaredTypes.add(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
w.write(indent + "enum " + typeName + " { ");
LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child;
for (int i=0; i<leaf.getLegalValues().length; ++i) {
if (i != 0) {
w.write(", ");
}
w.write(leaf.getLegalValues()[i]);
}
w.write(" };\n"
+ indent + "typedef std::vector<" + typeName + "> "
+ typeName + "Vector;"
+ "\n"
+ indent + "typedef std::map<vespalib::string, " + typeName + "> "
+ typeName + "Map;"
+ "\n"
+ indent + "static " + typeName + " get" + typeName + "(const vespalib::string&);\n"
+ indent + "static vespalib::string get" + typeName + "Name(" + typeName + " e);\n"
+ "\n"
);
w.write(indent + "struct Internal" + typeName + "Converter {\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector);\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector);\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault);\n");
w.write(indent + "};\n");
} else {
w.write(indent + "class " + typeName + " {\n");
w.write(indent + "public:\n");
writeTypeDeclarations(w, child, indent + " ");
writeStructFunctionDeclarations(w, getTypeName(child, false), child, indent + " ");
writeMembers(w, child, indent + " ");
w.write(indent + "};\n");
w.write(indent + "typedef std::vector<" + typeName + "> " + typeName + "Vector;\n\n");
w.write(indent + "typedef std::map<vespalib::string, " + typeName + "> " + typeName + "Map;\n\n");
}
}
}
}
void writeHeaderFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(""
+ indent + "const vespalib::string & defName() const override { return CONFIG_DEF_NAME; }\n"
+ indent + "const vespalib::string & defVersion() const { return CONFIG_DEF_VERSION; }\n"
+ indent + "const vespalib::string & defMd5() const override { return CONFIG_DEF_MD5; }\n"
+ indent + "const vespalib::string & defNamespace() const override { return CONFIG_DEF_NAMESPACE; }\n"
+ indent + "void serialize(::config::ConfigDataBuffer & __buffer) const override;\n");
writeConfigClassFunctionDeclarations(w, "Internal" + className + "Type", node, indent);
}
void writeConfigClassFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(indent + className + "(const ::config::ConfigValue & __value);\n");
w.write(indent + className + "(const ::config::ConfigDataBuffer & __value);\n");
w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n");
writeCommonFunctionDeclarations(w, className, node, indent);
}
void writeStructFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(indent + className + "(const std::vector<vespalib::string> & __lines);\n");
w.write(indent + className + "(const vespalib::slime::Inspector & __inspector);\n");
w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n");
writeCommonFunctionDeclarations(w, className, node, indent);
w.write(indent + "void serialize(vespalib::slime::Cursor & __cursor) const;\n");
}
void writeClassCopyConstructorDeclaration(Writer w, String className, String indent) throws IOException {
w.write(indent + className + "(const " + className + " & __rhs);\n");
}
void writeClassAssignmentOperatorDeclaration(Writer w, String className, String indent) throws IOException {
w.write(indent + className + " & operator = (const " + className + " & __rhs);\n");
}
void writeConfigClassCopyConstructorDefinition(Writer w, String parent, String className) throws IOException {
w.write(parent + "::" + className + "(const " + className + " & __rhs) = default;\n");
}
void writeConfigClassAssignmentOperatorDefinition(Writer w, String parent, String className) throws IOException {
w.write(parent + " & " + parent + "::" + "operator =(const " + className + " & __rhs) = default;\n");
}
void writeClassCopyConstructorDefinition(Writer w, String parent, CNode node) throws IOException {
String typeName = getTypeName(node, false);
w.write(parent + "::" + typeName + "(const " + typeName + " & __rhs) = default;\n");
}
void writeClassAssignmentOperatorDefinition(Writer w, String parent, CNode node) throws IOException {
String typeName = getTypeName(node, false);
w.write(parent + " & " + parent + "::" + "operator = (const " + typeName + " & __rhs) = default;\n");
}
void writeDestructor(Writer w, String parent, String className) throws IOException {
w.write(parent + "~" + className + "() { } \n");
}
void writeCommonFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write("" + indent + className + "();\n");
writeClassCopyConstructorDeclaration(w, className, indent);
writeClassAssignmentOperatorDeclaration(w, className, indent);
w.write("" + indent + "~" + className + "();\n");
w.write("\n"
+ indent + "bool operator==(const " + className + "& __rhs) const;\n"
+ indent + "bool operator!=(const " + className + "& __rhs) const;\n"
+ "\n"
);
}
static String getTypeName(CNode node, boolean includeArray) {
String type = null;
if (node instanceof InnerCNode) {
InnerCNode innerNode = (InnerCNode) node;
type = getTypeName(innerNode.getName());
} else if (node instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) node;
if (leaf.getType().equals("bool")) {
type = "bool";
} else if (leaf.getType().equals("int")) {
type = "int32_t";
} else if (leaf.getType().equals("long")) {
type = "int64_t";
} else if (leaf.getType().equals("double")) {
type = "double";
} else if (leaf.getType().equals("enum")) {
type = getTypeName(node.getName());
} else if (leaf.getType().equals("string")) {
type = "vespalib::string";
} else if (leaf.getType().equals("reference")) {
type = "vespalib::string";
} else if (leaf.getType().equals("file")) {
type = "vespalib::string";
} else {
throw new IllegalArgumentException("Unknown leaf datatype " + leaf.getType());
}
}
if (type == null) {
throw new IllegalArgumentException("Unknown node " + node);
}
if (node.isArray && includeArray) {
if (vectorTypeDefs.containsKey(type)) {
type = vectorTypeDefs.get(type);
} else {
type = type + "Vector";
}
} else if (node.isMap && includeArray) {
if (mapTypeDefs.containsKey(type)) {
type = mapTypeDefs.get(type);
} else {
type = type + "Map";
}
}
return type;
}
void writeStaticMemberDeclarations(Writer w, String indent) throws IOException {
w.write(""
+ indent + "static const vespalib::string CONFIG_DEF_MD5;\n"
+ indent + "static const vespalib::string CONFIG_DEF_VERSION;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAME;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAMESPACE;\n"
+ indent + "static const std::vector<vespalib::string> CONFIG_DEF_SCHEMA;\n"
+ indent + "static const int64_t CONFIG_DEF_SERIALIZE_VERSION;\n"
+ "\n"
);
}
void writeComment(Writer w, String indent, String comment, boolean javadoc)
throws IOException
{
/** If simple one liner comment, write on one line. */
if (javadoc && comment.indexOf('\n') == -1
&& comment.length() <= 80 - (indent.length() + 7))
{
w.write(indent + "/** " + comment + " */\n");
return;
} else if (!javadoc && comment.indexOf('\n') == -1
&& comment.length() <= 80 - (indent.length() + 3))
{
w.write(indent + "
return;
}
/** If not we need to write multi line comment. */
int maxLineLen = 80 - (indent.length() + 3);
if (javadoc) w.write(indent + "/**\n");
do {
String current;
int newLine = comment.indexOf('\n');
if (newLine == -1) {
current = comment;
comment = "";
} else {
current = comment.substring(0, newLine);
comment = comment.substring(newLine + 1);
}
if (current.length() > maxLineLen) {
int spaceIndex = current.lastIndexOf(' ', maxLineLen);
if (spaceIndex >= maxLineLen - 15) {
comment = current.substring(spaceIndex + 1)
+ "\n" + comment;
current = current.substring(0, spaceIndex);
} else {
comment = current.substring(maxLineLen) + "\n" + comment;
current = current.substring(0, maxLineLen) + "-";
}
}
w.write(indent + (javadoc ? " * " : "
} while (comment.length() > 0);
if (javadoc) w.write(indent + " */\n");
}
void writeMembers(Writer w, CNode node, String indent) throws IOException {
for (CNode child : node.getChildren()) {
String typeName = getTypeName(child, true);
if (child.getComment().length() > 0) {
String comment = child.getComment();
int index;
do {
index = comment.indexOf("\n\n");
if (index == -1) break;
String next = comment.substring(0, index);
comment = comment.substring(index + 2);
w.write("\n");
writeComment(w, indent, next, false);
} while (true);
w.write("\n");
writeComment(w, indent, comment, true);
}
w.write(indent + typeName + " " + getIdentifier(child.getName()) + ";");
if (child instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) child;
DefaultValue value = leaf.getDefaultValue();
if (value != null) {
w.write("
}
}
w.write("\n");
}
}
void writeHeaderTypeDefs(Writer w, CNode root, String indent) throws IOException {
w.write(indent + "typedef std::unique_ptr<const " + getInternalClassName(root) + "> UP;\n");
for (Map.Entry<String, String> entry : vectorTypeDefs.entrySet()) {
String typeName = entry.getKey();
String vectorName = entry.getValue();
String typeDef = "typedef std::vector<" + typeName + "> " + vectorName;
w.write(indent + typeDef + ";\n");
}
for (Map.Entry<String, String> entry : mapTypeDefs.entrySet()) {
String typeName = entry.getKey();
String mapName = entry.getValue();
String typeDef = "typedef std::map<vespalib::string, " + typeName + "> " + mapName;
w.write(indent + typeDef + ";\n");
}
}
private static String getInternalClassName(CNode root) {
return "Internal" + getTypeName(root, false) + "Type";
}
void writeHeaderFooter(Writer w, CNode root) throws IOException {
String [] namespaceList = generateCppNameSpace(root);
String namespaceDefine = generateCppNameSpaceDefine(namespaceList);
String className = getTypeName(root, false);
String defineName = namespaceDefine + "_" + getDefineName(className);
w.write(""
+ "};\n"
+ "\n"
+ "}
w.write("typedef internal::" + getInternalClassName(root) + " " + className + "ConfigBuilder;\n");
w.write("typedef const internal::" + getInternalClassName(root) + " " + className + "Config;\n");
w.write("\n");
writeNameSpaceEnd(w, namespaceList);
w.write("
} | + indent + "const vespalib::string & defVersion() const { return CONFIG_DEF_VERSION; }\n" | void writeHeaderHeader(Writer w, CNode root) throws IOException {
String [] namespaceList = generateCppNameSpace(root);
String namespacePrint = generateCppNameSpaceString(namespaceList);
String namespaceDefine = generateCppNameSpaceDefine(namespaceList);
String className = getTypeName(root, false);
String defineName = namespaceDefine + "_" + getDefineName(className);
w.write(""
+ "/**\n"
+ " * @class " + namespacePrint + "::" + className + "\n"
+ " * @ingroup config\n"
+ " *\n"
+ " * @brief This is an autogenerated class for handling VESPA config.\n"
+ " *\n"
+ " * This class is autogenerated by vespa from a config definition file.\n"
+ " * To subscribe to config, you need to include the config/config.h header, \n"
+ " * and create a ConfigSubscriber in order to subscribe for config.\n"
);
if (root.getComment().length() > 0) {
w.write(" *\n");
StringTokenizer st = new StringTokenizer(root.getComment(), "\n");
while (st.hasMoreTokens()) {
w.write(" * " + st.nextToken() + "\n");
}
}
w.write(""
+ " */\n"
+ "
+ "
+ "\n"
+ "
+ "
+ "
+ "
+ "\n");
w.write("namespace config {\n");
w.write(" class ConfigValue;\n");
w.write(" class ConfigPayload;\n");
w.write("}\n\n");
w.write("namespace vespalib::slime {\n");
w.write(" struct Inspector;\n");
w.write(" struct Cursor;\n");
w.write("}\n\n");
writeNameSpaceBegin(w, namespaceList);
w.write("\nnamespace internal {\n\n");
w.write(""
+ "/**\n"
+ " * This class contains the config. DO NOT USE THIS CLASS DIRECTLY. Use the typedeffed\n"
+ " * versions after this class declaration.\n"
+ " */\n"
+ "class Internal" + className + "Type : public ::config::ConfigInstance\n"
+ "{\n"
);
}
void writeTypeDeclarations(Writer w, CNode node, String indent) throws IOException {
java.util.Set<String> declaredTypes = new java.util.HashSet<String>();
for (CNode child : node.getChildren()) {
boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf);
if (complexType && !declaredTypes.contains(child.getName())) {
String typeName = getTypeName(child, false);
declaredTypes.add(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
w.write(indent + "enum " + typeName + " { ");
LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child;
for (int i=0; i<leaf.getLegalValues().length; ++i) {
if (i != 0) {
w.write(", ");
}
w.write(leaf.getLegalValues()[i]);
}
w.write(" };\n"
+ indent + "typedef std::vector<" + typeName + "> "
+ typeName + "Vector;"
+ "\n"
+ indent + "typedef std::map<vespalib::string, " + typeName + "> "
+ typeName + "Map;"
+ "\n"
+ indent + "static " + typeName + " get" + typeName + "(const vespalib::string&);\n"
+ indent + "static vespalib::string get" + typeName + "Name(" + typeName + " e);\n"
+ "\n"
);
w.write(indent + "struct Internal" + typeName + "Converter {\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector);\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector);\n");
w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault);\n");
w.write(indent + "};\n");
} else {
w.write(indent + "class " + typeName + " {\n");
w.write(indent + "public:\n");
writeTypeDeclarations(w, child, indent + " ");
writeStructFunctionDeclarations(w, getTypeName(child, false), child, indent + " ");
writeMembers(w, child, indent + " ");
w.write(indent + "};\n");
w.write(indent + "typedef std::vector<" + typeName + "> " + typeName + "Vector;\n\n");
w.write(indent + "typedef std::map<vespalib::string, " + typeName + "> " + typeName + "Map;\n\n");
}
}
}
}
void writeHeaderFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(""
+ indent + "const vespalib::string & defName() const override { return CONFIG_DEF_NAME; }\n"
+ indent + "const vespalib::string & defMd5() const override { return CONFIG_DEF_MD5; }\n"
+ indent + "const vespalib::string & defNamespace() const override { return CONFIG_DEF_NAMESPACE; }\n"
+ indent + "void serialize(::config::ConfigDataBuffer & __buffer) const override;\n");
writeConfigClassFunctionDeclarations(w, "Internal" + className + "Type", node, indent);
}
void writeConfigClassFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(indent + className + "(const ::config::ConfigValue & __value);\n");
w.write(indent + className + "(const ::config::ConfigDataBuffer & __value);\n");
w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n");
writeCommonFunctionDeclarations(w, className, node, indent);
}
void writeStructFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write(indent + className + "(const std::vector<vespalib::string> & __lines);\n");
w.write(indent + className + "(const vespalib::slime::Inspector & __inspector);\n");
w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n");
writeCommonFunctionDeclarations(w, className, node, indent);
w.write(indent + "void serialize(vespalib::slime::Cursor & __cursor) const;\n");
}
void writeClassCopyConstructorDeclaration(Writer w, String className, String indent) throws IOException {
w.write(indent + className + "(const " + className + " & __rhs);\n");
}
void writeClassAssignmentOperatorDeclaration(Writer w, String className, String indent) throws IOException {
w.write(indent + className + " & operator = (const " + className + " & __rhs);\n");
}
void writeConfigClassCopyConstructorDefinition(Writer w, String parent, String className) throws IOException {
w.write(parent + "::" + className + "(const " + className + " & __rhs) = default;\n");
}
void writeConfigClassAssignmentOperatorDefinition(Writer w, String parent, String className) throws IOException {
w.write(parent + " & " + parent + "::" + "operator =(const " + className + " & __rhs) = default;\n");
}
void writeClassCopyConstructorDefinition(Writer w, String parent, CNode node) throws IOException {
String typeName = getTypeName(node, false);
w.write(parent + "::" + typeName + "(const " + typeName + " & __rhs) = default;\n");
}
void writeClassAssignmentOperatorDefinition(Writer w, String parent, CNode node) throws IOException {
String typeName = getTypeName(node, false);
w.write(parent + " & " + parent + "::" + "operator = (const " + typeName + " & __rhs) = default;\n");
}
void writeDestructor(Writer w, String parent, String className) throws IOException {
w.write(parent + "~" + className + "() { } \n");
}
void writeCommonFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException {
w.write("" + indent + className + "();\n");
writeClassCopyConstructorDeclaration(w, className, indent);
writeClassAssignmentOperatorDeclaration(w, className, indent);
w.write("" + indent + "~" + className + "();\n");
w.write("\n"
+ indent + "bool operator==(const " + className + "& __rhs) const;\n"
+ indent + "bool operator!=(const " + className + "& __rhs) const;\n"
+ "\n"
);
}
static String getTypeName(CNode node, boolean includeArray) {
String type = null;
if (node instanceof InnerCNode) {
InnerCNode innerNode = (InnerCNode) node;
type = getTypeName(innerNode.getName());
} else if (node instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) node;
if (leaf.getType().equals("bool")) {
type = "bool";
} else if (leaf.getType().equals("int")) {
type = "int32_t";
} else if (leaf.getType().equals("long")) {
type = "int64_t";
} else if (leaf.getType().equals("double")) {
type = "double";
} else if (leaf.getType().equals("enum")) {
type = getTypeName(node.getName());
} else if (leaf.getType().equals("string")) {
type = "vespalib::string";
} else if (leaf.getType().equals("reference")) {
type = "vespalib::string";
} else if (leaf.getType().equals("file")) {
type = "vespalib::string";
} else {
throw new IllegalArgumentException("Unknown leaf datatype " + leaf.getType());
}
}
if (type == null) {
throw new IllegalArgumentException("Unknown node " + node);
}
if (node.isArray && includeArray) {
if (vectorTypeDefs.containsKey(type)) {
type = vectorTypeDefs.get(type);
} else {
type = type + "Vector";
}
} else if (node.isMap && includeArray) {
if (mapTypeDefs.containsKey(type)) {
type = mapTypeDefs.get(type);
} else {
type = type + "Map";
}
}
return type;
}
void writeStaticMemberDeclarations(Writer w, String indent) throws IOException {
w.write(""
+ indent + "static const vespalib::string CONFIG_DEF_MD5;\n"
+ indent + "static const vespalib::string CONFIG_DEF_VERSION;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAME;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAMESPACE;\n"
+ indent + "static const std::vector<vespalib::string> CONFIG_DEF_SCHEMA;\n"
+ indent + "static const int64_t CONFIG_DEF_SERIALIZE_VERSION;\n"
+ "\n"
);
}
void writeComment(Writer w, String indent, String comment, boolean javadoc)
throws IOException
{
/** If simple one liner comment, write on one line. */
if (javadoc && comment.indexOf('\n') == -1
&& comment.length() <= 80 - (indent.length() + 7))
{
w.write(indent + "/** " + comment + " */\n");
return;
} else if (!javadoc && comment.indexOf('\n') == -1
&& comment.length() <= 80 - (indent.length() + 3))
{
w.write(indent + "
return;
}
/** If not we need to write multi line comment. */
int maxLineLen = 80 - (indent.length() + 3);
if (javadoc) w.write(indent + "/**\n");
do {
String current;
int newLine = comment.indexOf('\n');
if (newLine == -1) {
current = comment;
comment = "";
} else {
current = comment.substring(0, newLine);
comment = comment.substring(newLine + 1);
}
if (current.length() > maxLineLen) {
int spaceIndex = current.lastIndexOf(' ', maxLineLen);
if (spaceIndex >= maxLineLen - 15) {
comment = current.substring(spaceIndex + 1)
+ "\n" + comment;
current = current.substring(0, spaceIndex);
} else {
comment = current.substring(maxLineLen) + "\n" + comment;
current = current.substring(0, maxLineLen) + "-";
}
}
w.write(indent + (javadoc ? " * " : "
} while (comment.length() > 0);
if (javadoc) w.write(indent + " */\n");
}
void writeMembers(Writer w, CNode node, String indent) throws IOException {
for (CNode child : node.getChildren()) {
String typeName = getTypeName(child, true);
if (child.getComment().length() > 0) {
String comment = child.getComment();
int index;
do {
index = comment.indexOf("\n\n");
if (index == -1) break;
String next = comment.substring(0, index);
comment = comment.substring(index + 2);
w.write("\n");
writeComment(w, indent, next, false);
} while (true);
w.write("\n");
writeComment(w, indent, comment, true);
}
w.write(indent + typeName + " " + getIdentifier(child.getName()) + ";");
if (child instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) child;
DefaultValue value = leaf.getDefaultValue();
if (value != null) {
w.write("
}
}
w.write("\n");
}
}
void writeHeaderTypeDefs(Writer w, CNode root, String indent) throws IOException {
w.write(indent + "typedef std::unique_ptr<const " + getInternalClassName(root) + "> UP;\n");
for (Map.Entry<String, String> entry : vectorTypeDefs.entrySet()) {
String typeName = entry.getKey();
String vectorName = entry.getValue();
String typeDef = "typedef std::vector<" + typeName + "> " + vectorName;
w.write(indent + typeDef + ";\n");
}
for (Map.Entry<String, String> entry : mapTypeDefs.entrySet()) {
String typeName = entry.getKey();
String mapName = entry.getValue();
String typeDef = "typedef std::map<vespalib::string, " + typeName + "> " + mapName;
w.write(indent + typeDef + ";\n");
}
}
private static String getInternalClassName(CNode root) {
return "Internal" + getTypeName(root, false) + "Type";
}
void writeHeaderFooter(Writer w, CNode root) throws IOException {
String [] namespaceList = generateCppNameSpace(root);
String namespaceDefine = generateCppNameSpaceDefine(namespaceList);
String className = getTypeName(root, false);
String defineName = namespaceDefine + "_" + getDefineName(className);
w.write(""
+ "};\n"
+ "\n"
+ "}
w.write("typedef internal::" + getInternalClassName(root) + " " + className + "ConfigBuilder;\n");
w.write("typedef const internal::" + getInternalClassName(root) + " " + className + "Config;\n");
w.write("\n");
writeNameSpaceEnd(w, namespaceList);
w.write("
} | class CppClassBuilder implements ClassBuilder {
private final CNode root;
private final NormalizedDefinition nd;
private final File rootDir;
private final String relativePathUnderRoot;
private static final Map<String, String> vectorTypeDefs;
static {
Map<String, String> map = new HashMap<String, String>();
map.put("bool", "BoolVector");
map.put("int32_t", "IntVector");
map.put("int64_t", "LongVector");
map.put("double", "DoubleVector");
map.put("vespalib::string", "StringVector");
vectorTypeDefs = Collections.unmodifiableMap(map);
}
private static final Map<String, String> mapTypeDefs;
static {
Map<String, String> map = new HashMap<>();
map.put("bool", "BoolMap");
map.put("int32_t", "IntMap");
map.put("int64_t", "LongMap");
map.put("double", "DoubleMap");
map.put("vespalib::string", "StringMap");
mapTypeDefs = Collections.unmodifiableMap(map);
}
private static final Map<String, String> slimeTypeMap;
static {
Map<String, String> map = new HashMap<String, String>();
map.put("bool", "Bool");
map.put("int", "Long");
map.put("long", "Long");
map.put("double", "Double");
map.put("string", "String");
map.put("enum", "String");
map.put("file", "String");
map.put("reference", "String");
slimeTypeMap = Collections.unmodifiableMap(map);
}
public CppClassBuilder(CNode root, NormalizedDefinition nd, File rootDir, String relativePathUnderRoot) {
this.root = root;
this.nd = nd;
this.rootDir = rootDir;
this.relativePathUnderRoot = relativePathUnderRoot;
}
public void createConfigClasses() {
generateConfig(root, nd);
}
String readFile(File f) throws IOException {
if (!f.isFile()) return null;
StringBuilder sb = new StringBuilder();
try (BufferedReader sr = new BufferedReader(new FileReader(f))) {
while (true) {
String line = sr.readLine();
if (line == null) break;
sb.append(line).append("\n");
}
return sb.toString();
}
}
void writeFile(File f, String content) throws IOException {
FileWriter fw = new FileWriter(f);
fw.write(content);
fw.close();
}
void generateConfig(CNode root, NormalizedDefinition nd) {
try{
StringWriter headerWriter = new StringWriter();
StringWriter bodyWriter = new StringWriter();
writeHeaderFile(headerWriter, root);
writeBodyFile(bodyWriter, root, relativePathUnderRoot, nd);
String newHeader = headerWriter.toString();
String newBody = bodyWriter.toString();
File headerFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "h"));
File bodyFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "cpp"));
String oldHeader = readFile(headerFile);
String oldBody = readFile(bodyFile);
if (oldHeader == null || !oldHeader.equals(newHeader)) {
writeFile(headerFile, newHeader);
}
if (oldBody == null || !oldBody.equals(newBody)) {
writeFile(bodyFile, newBody);
}
} catch (IOException e) {
e.printStackTrace();
}
}
String getFileName(CNode node, String extension) {
return "config-" + node.getName() + "." + extension;
}
static String removeDashesAndUpperCaseAllFirstChars(String source, boolean capitalizeFirst) {
String parts[] = source.split("[-_]");
StringBuilder sb = new StringBuilder();
for (String s : parts) {
sb.append(s.substring(0, 1).toUpperCase()).append(s.substring(1));
}
String result = sb.toString();
if (!capitalizeFirst) {
result = result.substring(0,1).toLowerCase() + result.substring(1);
}
return result;
}
/** Convert name of type to the name we want to use in macro ifdefs in file. */
String getDefineName(String name) {
return name.toUpperCase().replace("-", "");
}
/** Convert name of type to the name we want to use as type name in the generated code. */
static String getTypeName(String name) {
return removeDashesAndUpperCaseAllFirstChars(name, true);
}
/** Convert name of an identifier from value in def file to name to use in C++ file. */
String getIdentifier(String name) {
return removeDashesAndUpperCaseAllFirstChars(name, false);
}
void writeHeaderFile(Writer w, CNode root) throws IOException {
writeHeaderHeader(w, root);
writeHeaderPublic(w, root);
writeHeaderFooter(w, root);
}
void writeHeaderPublic(Writer w, CNode root) throws IOException {
w.write("public:\n");
writeHeaderTypeDefs(w, root, " ");
writeTypeDeclarations(w, root, " ");
writeHeaderFunctionDeclarations(w, getTypeName(root, false), root, " ");
writeStaticMemberDeclarations(w, " ");
writeMembers(w, root, " ");
}
String [] generateCppNameSpace(CNode root) {
String namespace = root.getNamespace();
if (namespace.contains(".")) {
return namespace.split("\\.");
}
return new String[]{namespace};
}
String generateCppNameSpaceString(String[] namespaceList) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < namespaceList.length - 1; i++) {
str.append(namespaceList[i]);
str.append("::");
}
str.append(namespaceList[namespaceList.length - 1]);
return str.toString();
}
String generateCppNameSpaceDefine(String[] namespaceList) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < namespaceList.length - 1; i++) {
str.append(namespaceList[i].toUpperCase());
str.append("_");
}
str.append(namespaceList[namespaceList.length - 1].toUpperCase());
return str.toString();
}
void writeNameSpaceBegin(Writer w, String [] namespaceList) throws IOException {
w.write("namespace ");
w.write(getNestedNameSpace(namespaceList));
w.write(" {\n");
}
String getNestedNameSpace(String [] namespaceList) {
return Arrays.stream(namespaceList).map(String::toString).collect(Collectors.joining("::"));
}
void writeNameSpaceEnd(Writer w, String [] namespaceList) throws IOException {
w.write("}
w.write(getNestedNameSpace(namespaceList));
w.write("\n");
}
void writeBodyFile(Writer w, CNode root, String subdir, NormalizedDefinition nd) throws IOException {
writeBodyHeader(w, root, subdir);
writeStaticMemberDefinitions(w, root, nd);
writeDefinition(w, root, null);
writeBodyFooter(w, root);
}
void writeBodyHeader(Writer w, CNode root, String subdir) throws IOException {
if (subdir == null) {
w.write("
} else {
w.write("
}
w.write("\n");
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("\n");
writeNameSpaceBegin(w, generateCppNameSpace(root));
w.write("\nnamespace internal {\n\n");
w.write("using ::config::ConfigParser;\n");
w.write("using ::config::InvalidConfigException;\n");
w.write("using ::config::ConfigInstance;\n");
w.write("using ::config::ConfigValue;\n");
w.write("using namespace vespalib::slime::convenience;\n");
w.write("\n");
}
void writeStaticMemberDefinitions(Writer w, CNode root, NormalizedDefinition nd) throws IOException {
String typeName = getInternalClassName(root);
w.write("const vespalib::string " + typeName + "::CONFIG_DEF_MD5(\"" + root.defMd5 + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_VERSION(\"" + root.defVersion + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAME(\"" + root.defName + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAMESPACE(\"" + root.getNamespace() + "\");\n"
+ "const int64_t " + typeName + "::CONFIG_DEF_SERIALIZE_VERSION(1);\n");
w.write("const static vespalib::string __internalDefSchema[] = {\n");
for (String line : nd.getNormalizedContent()) {
w.write("\"" + line.replace("\"", "\\\"") + "\",\n");
}
w.write("};\n");
w.write("const std::vector<vespalib::string> " + typeName + "::CONFIG_DEF_SCHEMA(__internalDefSchema,\n");
w.write(" __internalDefSchema + (sizeof(__internalDefSchema) / \n");
w.write(" sizeof(__internalDefSchema[0])));\n");
w.write("\n");
}
void writeDefinition(Writer w, CNode node, String fullClassName) throws IOException {
boolean root = false;
if (fullClassName == null) {
fullClassName = getInternalClassName(node);
root = true;
}
final String parent = fullClassName + "::";
java.util.Set<String> declaredTypes = new java.util.HashSet<String>();
for (CNode child : node.getChildren()) {
boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf);
if (complexType && !declaredTypes.contains(child.getName())) {
String typeName = getTypeName(child, false);
declaredTypes.add(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child;
w.write(parent + typeName + "\n"
+ parent + "get" + typeName + "(const vespalib::string& name)\n"
+ "{\n"
);
for (int i=0; i<leaf.getLegalValues().length; ++i) {
w.write(" " + (i != 0 ? "} else " : ""));
w.write("if (name == \"" + leaf.getLegalValues()[i] + "\") {\n"
+ " return " + leaf.getLegalValues()[i] + ";\n");
}
w.write(" } else {\n"
+ " throw InvalidConfigException(\"Illegal enum value '\" + name + \"'\");\n"
+ " }\n"
+ "}\n"
+ "\n"
);
w.write("vespalib::string\n"
+ parent + "get" + typeName + "Name(" + typeName + " t)\n"
+ "{\n"
+ " switch (t) {\n"
);
for (int i=0; i<leaf.getLegalValues().length; ++i) {
w.write(" case " + leaf.getLegalValues()[i] + ": return \"" + leaf.getLegalValues()[i] + "\";\n");
}
w.write(" default:\n"
+ " {\n"
+ " vespalib::asciistream ost;\n"
+ " ost << \"UNKNOWN(\" << t << \")\";\n"
+ " return ost.str();\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "\n"
);
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector) {\n");
w.write(" if (__inspector.valid()) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write(" }\n");
w.write(" throw InvalidConfigException(\"Value for '\" + __fieldName + \"' required but not found\");\n");
w.write("}\n");
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::slime::Inspector & __inspector) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write("}\n");
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault) {\n");
w.write(" if (__inspector.valid()) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write(" }\n");
w.write(" return __eDefault;\n");
w.write("}\n\n");
} else {
writeDefinition(w, child, parent + typeName);
}
}
}
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
w.write(parent + typeName + "()\n");
for (int i=0; i<node.getChildren().length; ++i) {
CNode child = node.getChildren()[i];
String childName = getIdentifier(child.getName());
if (i == 0) {
w.write(" : " + childName + "(");
} else {
w.write("),\n " + childName + "(");
}
if (child.isArray || child.isMap) {
} else if (child instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) child;
if (leaf.getDefaultValue() != null) {
w.write(getDefaultValue(leaf));
} else {
if (leaf.getType().equals("bool")) {
w.write("false");
} else if (leaf.getType().equals("int")) {
w.write("0");
} else if (leaf.getType().equals("double")) {
w.write("0");
} else if (leaf.getType().equals("string")) {
} else if (leaf.getType().equals("enum")) {
LeafCNode.EnumLeaf enumNode = (LeafCNode.EnumLeaf) leaf;
w.write(enumNode.getLegalValues()[0]);
} else if (leaf.getType().equals("reference")) {
} else if (leaf.getType().equals("file")) {
}
}
}
}
if (node.getChildren().length > 0)
w.write(")\n");
w.write(""
+ "{\n"
+ "}\n"
+ "\n"
);
if (root) {
writeConfigClassCopyConstructorDefinition(w, fullClassName, typeName);
writeConfigClassAssignmentOperatorDefinition(w, fullClassName, typeName);
} else {
writeClassCopyConstructorDefinition(w, fullClassName, node);
writeClassAssignmentOperatorDefinition(w, fullClassName, node);
}
writeDestructor(w, parent, typeName);
String indent = " ";
if (root) {
w.write(typeName + "::" + typeName + "(const ConfigValue & __value)\n"
+ "{\n"
+ indent + "try {\n");
indent = " ";
w.write(indent + "const std::vector<vespalib::string> & __lines(__value.getLines());\n");
} else {
w.write(parent + typeName + "(const std::vector<vespalib::string> & __lines)\n"
+ "{\n");
}
w.write(""
+ indent + "std::set<vespalib::string> __remainingValuesToParse("
+ "__lines.begin(), __lines.end());\n");
w.write(indent + "for(std::set<vespalib::string>::iterator __rVTPiter = __remainingValuesToParse.begin();\n"
+ indent + " __rVTPiter != __remainingValuesToParse.end();)\n"
+ indent + "{\n"
+ indent + " if (ConfigParser::stripWhitespace(*__rVTPiter).empty()) {\n"
+ indent + " std::set<vespalib::string>::iterator __rVTPiter2 = __rVTPiter++;\n"
+ indent + " __remainingValuesToParse.erase(__rVTPiter2);\n"
+ indent + " } else {\n"
+ indent + " ++__rVTPiter;\n"
+ indent + " }\n"
+ indent + "}\n");
for (CNode child : node.getChildren()) {
String childType = getTypeName(child, false);
String childName = getIdentifier(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
if (child.isArray) {
w.write(indent + "std::vector<vespalib::string> " + childName + "__ValueList(\n ");
} else if (child.isMap) {
w.write(indent + "std::map<vespalib::string, vespalib::string> " + childName + "__ValueMap(\n ");
} else {
w.write(indent + childName + " = get" + childType + "(");
}
childType = "vespalib::string";
} else {
w.write(indent + childName + " = ");
}
if (child.isArray) {
w.write("ConfigParser::parseArray<" + childType + ">(\""
+ child.getName() + "\", __lines)");
} else if (child.isMap) {
w.write("ConfigParser::parseMap<" + childType + ">(\""
+ child.getName() + "\", __lines)");
} else {
if (child instanceof LeafCNode) {
w.write("ConfigParser::parse<" + childType + ">(\""
+ child.getName() + "\", __lines");
} else {
w.write("ConfigParser::parseStruct<" + childType + ">(\""
+ child.getName() + "\", __lines");
}
if (child instanceof LeafCNode && ((LeafCNode) child).getDefaultValue() != null) {
LeafCNode leaf = (LeafCNode) child;
if (leaf.getDefaultValue().getValue() != null) {
String defaultVal = getDefaultValue(leaf);
if (leaf instanceof LeafCNode.EnumLeaf) {
defaultVal = '"' + defaultVal + '"';
}
w.write(", " + defaultVal);
}
}
w.write(")");
}
if (child instanceof LeafCNode.EnumLeaf) {
childType = getTypeName(child, false);
w.write(");\n");
if (child.isArray) {
w.write(indent + childName + ".reserve(" + childName + "__ValueList.size());\n"
+ indent + "for (std::vector<vespalib::string>::const_iterator __it\n"
+ indent + " = " + childName + "__ValueList.begin();\n"
+ indent + " __it != " + childName + "__ValueList.end(); ++__it)\n"
+ indent + "{\n"
+ indent + " " + childName + ".push_back(get" + childType + "(*__it));\n"
+ indent + "}\n"
);
} else if (child.isMap) {
w.write(indent + "typedef std::map<vespalib::string, vespalib::string> __ValueMap;\n");
w.write(indent + "for (__ValueMap::iterator __it(" + childName + "__ValueMap.begin()), __mt(" + childName + "__ValueMap.end()); __it != __mt; __it++) {\n"
+ " " + childName + "[__it->first] = get" + childType + "(__it->second);\n"
+ "}\n"
);
}
} else {
w.write(";\n");
}
w.write(indent + "ConfigParser::stripLinesForKey(\""
+ child.getName() + "\", "
+ "__remainingValuesToParse);\n");
}
if (root) {
indent = " ";
w.write(indent + "} catch (InvalidConfigException & __ice) {\n");
w.write(indent + " throw InvalidConfigException(\"Error parsing config '\" + CONFIG_DEF_NAME + \"' in namespace '\" + CONFIG_DEF_NAMESPACE + \"'"
+ ": \" + __ice.getMessage());\n"
+ indent + "}\n");
}
w.write("}\n"
+ "\n"
);
String lineBreak = (parent.length() + typeName.length() < 50 ? "" : "\n");
w.write("bool\n"
+ parent + lineBreak + "operator==(const " + typeName + "& __rhs) const\n"
+ "{\n"
+ " return ("
);
for (int i = 0; i<node.getChildren().length; ++i) {
CNode child = node.getChildren()[i];
String childName = getIdentifier(child.getName());
if (i != 0) {
w.write(" &&\n ");
}
w.write(childName + " == __rhs." + childName);
}
w.write(");\n"
+ "}\n"
+ "\n"
);
lineBreak = (parent.length() + typeName.length() < 50 ? "" : "\n");
w.write("bool\n"
+ parent + lineBreak + "operator!=(const " + typeName + "& __rhs) const\n"
+ "{\n"
+ " return !(operator==(__rhs));\n"
+ "}\n"
+ "\n"
);
writeSlimeEncoder(w, node, parent, root);
writeSlimeDecoder(w, node, parent, root);
writeSlimeConstructor(w, node, parent, root);
}
public void writeSlimeEncoder(Writer w, CNode node, String parent, boolean root) throws IOException
{
String indent = " ";
if (root) {
w.write("void\n"
+ parent + "serialize(::config::ConfigDataBuffer & __buffer) const\n"
+ "{\n");
w.write(indent + "vespalib::Slime & __slime(__buffer.slimeObject());\n");
w.write(indent + "vespalib::slime::Cursor & __croot = __slime.setObject();\n");
w.write(indent + "__croot.setDouble(\"version\", CONFIG_DEF_SERIALIZE_VERSION);\n");
w.write(indent + "vespalib::slime::Cursor & __key = __croot.setObject(\"configKey\");\n");
w.write(indent + "__key.setString(\"defName\", vespalib::Memory(CONFIG_DEF_NAME));\n");
w.write(indent + "__key.setString(\"defNamespace\", vespalib::Memory(CONFIG_DEF_NAMESPACE));\n");
w.write(indent + "__key.setString(\"defMd5\", vespalib::Memory(CONFIG_DEF_MD5));\n");
w.write(indent + "vespalib::slime::Cursor & __keySchema =__key.setArray(\"defSchema\");\n");
w.write(indent + "for (size_t i = 0; i < CONFIG_DEF_SCHEMA.size(); i++) {\n");
w.write(indent + " __keySchema.addString(vespalib::Memory(CONFIG_DEF_SCHEMA[i]));\n");
w.write(indent + "}\n");
w.write(indent + "vespalib::slime::Cursor & __cursor = __croot.setObject(\"configPayload\");\n");
} else {
w.write("void\n"
+ parent + "serialize(vespalib::slime::Cursor & __cursor) const\n"
+ "{\n");
}
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
w.write(indent + "{\n");
indent = " ";
w.write(indent + "vespalib::slime::Cursor & __c = __cursor.setObject(\"" + child.getName() + "\");\n");
if (child.isArray) {
w.write(indent + "__c.setString(\"type\", \"array\");\n");
w.write(indent + "vespalib::slime::Cursor & __c2 = __c.setArray(\"value\");\n");
w.write(indent + "for (size_t __i = 0; __i < " + childName + ".size(); __i++) {\n");
w.write(indent + " vespalib::slime::Cursor & __c3 = __c2.addObject();\n");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + " __c3.setString(\"type\", \"enum\");\n");
w.write(indent + " __c3.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(" + childName + "[__i])));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + " __c3.setString(\"type\", \"" + type + "\");\n");
w.write(indent + " __c3.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(" + childName + "[__i]));\n");
} else {
w.write("(\"value\", " + childName + "[__i]);\n");
}
} else {
w.write(indent + " __c3.setString(\"type\", \"struct\");\n");
w.write(indent + " Cursor & __c4 = __c3.setObject(\"value\");\n");
w.write(indent + " " + childName + "[__i].serialize(__c4);\n");
}
w.write(indent + "}\n");
} else if (child.isMap) {
w.write(indent + "__c.setString(\"type\", \"map\");\n");
w.write(indent + "vespalib::slime::Cursor & __c2 = __c.setArray(\"value\");\n");
String childMapType = getTypeName(child, true);
w.write(indent + "for (" + childMapType + "::const_iterator it(" + childName + ".begin()), mt(" + childName + ".end()); it != mt; it++) {\n");
w.write(indent + " vespalib::slime::Cursor & __c3 = __c2.addObject();\n");
w.write(indent + " __c3.setString(\"key\", vespalib::Memory(it->first));\n");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + " __c3.setString(\"type\", \"enum\");\n");
w.write(indent + " __c3.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(it->second)));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + " __c3.setString(\"type\", \"" + type + "\");\n");
w.write(indent + " __c3.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(it->second));\n");
} else {
w.write("(\"value\", it->second);\n");
}
} else {
w.write(indent + " __c3.setString(\"type\", \"struct\");\n");
w.write(indent + " Cursor & __c4 = __c3.setObject(\"value\");\n");
w.write(indent + " it->second.serialize(__c4);\n");
}
w.write(indent + "}\n");
} else {
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + "__c.setString(\"type\", \"enum\");\n");
w.write(indent + "__c.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(" + childName + ")));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + "__c.setString(\"type\", \"" + type + "\");\n");
w.write(indent + "__c.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(" + childName + "));\n");
} else {
w.write("(\"value\", " + childName + ");\n");
}
} else {
w.write(indent + "__c.setString(\"type\", \"struct\");\n");
w.write(indent + "Cursor & __c2 = __c.setObject(\"value\");\n");
w.write(indent + childName + ".serialize(__c2);\n");
}
}
indent = " ";
w.write(indent + "}\n");
}
w.write("}\n\n");
}
public void writeSlimeDecoder(Writer w, CNode node, String parent, boolean root) throws IOException {
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
String indent = " ";
if (root) {
w.write(""
+ typeName + "::" + typeName + "(const ::config::ConfigDataBuffer & __buffer)\n"
+ "{\n");
w.write(indent + "const vespalib::Slime & __slime(__buffer.slimeObject());\n");
w.write(indent + "vespalib::slime::Inspector & __croot = __slime.get();\n");
w.write(indent + "vespalib::slime::Inspector & __inspector = __croot[\"configPayload\"];\n");
} else {
w.write(""
+ parent + typeName + "(const vespalib::slime::Inspector & __inspector)\n"
+ "{\n");
}
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
String inspectorLine = "__inspector[\"" + child.getName() + "\"][\"value\"]";
if (child.isArray) {
w.write(indent + "for (size_t __i = 0; __i < " + inspectorLine + ".children(); __i++) {\n");
w.write(indent + " " + childName + ".push_back(");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + "[__i][\"value\"])");
}
w.write(");\n");
w.write(indent + "}\n");
} else if (child.isMap) {
w.write(indent + "for (size_t __i = 0; __i < " + inspectorLine + ".children(); __i++) {\n");
w.write(indent + " " + childName + "[" + inspectorLine + "[__i][\"key\"].asString().make_string()] = ");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + "[__i][\"value\"])");
}
w.write(";\n");
w.write(indent + "}\n");
} else {
w.write(indent + childName + " = ");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + ".as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + ".as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + ".as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + ")");
}
w.write(";\n");
}
}
w.write("}\n\n");
}
public void writeSlimeConstructor(Writer w, CNode node, String parent, boolean root) throws IOException {
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
String indent = " ";
if (root) {
w.write(""
+ typeName + "::" + typeName + "(const ::config::ConfigPayload & __payload)\n"
+ "{\n");
} else {
w.write(""
+ parent + typeName + "(const ::config::ConfigPayload & __payload)\n"
+ "{\n");
}
w.write(indent + "const vespalib::slime::Inspector & __inspector(__payload.get());\n");
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
String childInspector = "__inspector[\"" + child.getName() + "\"]";
if (child.isArray) {
String inserterName = "__" + childName + "Inserter";
w.write(indent + "::config::internal::VectorInserter<" + childType);
if (child instanceof LeafCNode.EnumLeaf) {
w.write(", Internal" + childType + "Converter");
}
w.write("> " + inserterName + "(" + childName + ");\n");
w.write(indent + childInspector + ".traverse(" + inserterName + ");\n");
} else if (child.isMap) {
String inserterName = "__" + childName + "Inserter";
w.write(indent + "::config::internal::MapInserter<" + childType);
if (child instanceof LeafCNode.EnumLeaf) {
w.write(", Internal" + childType + "Converter");
}
w.write("> " + inserterName + "(" + childName + ");\n");
w.write(indent + childInspector + ".traverse(" + inserterName + ");\n");
} else {
w.write(indent + childName + " = ");
if (child instanceof LeafCNode.EnumLeaf) {
w.write("Internal" + childType + "Converter");
} else {
w.write("::config::internal::ValueConverter<" + childType + ">");
}
if (child instanceof LeafCNode && ((LeafCNode) child).getDefaultValue() != null) {
LeafCNode leaf = (LeafCNode) child;
String defaultValue = getDefaultValue(leaf);
w.write("()(" + childInspector + ", " + defaultValue + ");\n");
} else if (child instanceof InnerCNode) {
w.write("()(" + childInspector + ");\n");
} else {
w.write("()(\"" + child.getName() + "\", " + childInspector + ");\n");
}
}
}
w.write("}\n\n");
}
void writeBodyFooter(Writer w, CNode root) throws IOException {
w.write("}
writeNameSpaceEnd(w, generateCppNameSpace(root));
}
String getDefaultValue(LeafCNode leaf) {
String defaultVal = leaf.getDefaultValue().getStringRepresentation();
if (leaf.getType().equals("string") && defaultVal.equals("null"))
throw new CodegenRuntimeException("Default value null not allowed for C++ config");
if (leaf.getType().equals("long") && "-9223372036854775808".equals(defaultVal)) {
return "LONG_MIN";
} else if (leaf.getType().equals("int") && "-2147483648".equals(defaultVal)) {
return "INT_MIN";
} else {
return defaultVal;
}
}
} | class CppClassBuilder implements ClassBuilder {
private final CNode root;
private final NormalizedDefinition nd;
private final File rootDir;
private final String relativePathUnderRoot;
private static final Map<String, String> vectorTypeDefs;
static {
Map<String, String> map = new HashMap<String, String>();
map.put("bool", "BoolVector");
map.put("int32_t", "IntVector");
map.put("int64_t", "LongVector");
map.put("double", "DoubleVector");
map.put("vespalib::string", "StringVector");
vectorTypeDefs = Collections.unmodifiableMap(map);
}
private static final Map<String, String> mapTypeDefs;
static {
Map<String, String> map = new HashMap<>();
map.put("bool", "BoolMap");
map.put("int32_t", "IntMap");
map.put("int64_t", "LongMap");
map.put("double", "DoubleMap");
map.put("vespalib::string", "StringMap");
mapTypeDefs = Collections.unmodifiableMap(map);
}
private static final Map<String, String> slimeTypeMap;
static {
Map<String, String> map = new HashMap<String, String>();
map.put("bool", "Bool");
map.put("int", "Long");
map.put("long", "Long");
map.put("double", "Double");
map.put("string", "String");
map.put("enum", "String");
map.put("file", "String");
map.put("reference", "String");
slimeTypeMap = Collections.unmodifiableMap(map);
}
public CppClassBuilder(CNode root, NormalizedDefinition nd, File rootDir, String relativePathUnderRoot) {
this.root = root;
this.nd = nd;
this.rootDir = rootDir;
this.relativePathUnderRoot = relativePathUnderRoot;
}
public void createConfigClasses() {
generateConfig(root, nd);
}
String readFile(File f) throws IOException {
if (!f.isFile()) return null;
StringBuilder sb = new StringBuilder();
try (BufferedReader sr = new BufferedReader(new FileReader(f))) {
while (true) {
String line = sr.readLine();
if (line == null) break;
sb.append(line).append("\n");
}
return sb.toString();
}
}
void writeFile(File f, String content) throws IOException {
FileWriter fw = new FileWriter(f);
fw.write(content);
fw.close();
}
void generateConfig(CNode root, NormalizedDefinition nd) {
try{
StringWriter headerWriter = new StringWriter();
StringWriter bodyWriter = new StringWriter();
writeHeaderFile(headerWriter, root);
writeBodyFile(bodyWriter, root, relativePathUnderRoot, nd);
String newHeader = headerWriter.toString();
String newBody = bodyWriter.toString();
File headerFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "h"));
File bodyFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "cpp"));
String oldHeader = readFile(headerFile);
String oldBody = readFile(bodyFile);
if (oldHeader == null || !oldHeader.equals(newHeader)) {
writeFile(headerFile, newHeader);
}
if (oldBody == null || !oldBody.equals(newBody)) {
writeFile(bodyFile, newBody);
}
} catch (IOException e) {
e.printStackTrace();
}
}
String getFileName(CNode node, String extension) {
return "config-" + node.getName() + "." + extension;
}
static String removeDashesAndUpperCaseAllFirstChars(String source, boolean capitalizeFirst) {
String parts[] = source.split("[-_]");
StringBuilder sb = new StringBuilder();
for (String s : parts) {
sb.append(s.substring(0, 1).toUpperCase()).append(s.substring(1));
}
String result = sb.toString();
if (!capitalizeFirst) {
result = result.substring(0,1).toLowerCase() + result.substring(1);
}
return result;
}
/** Convert name of type to the name we want to use in macro ifdefs in file. */
String getDefineName(String name) {
return name.toUpperCase().replace("-", "");
}
/** Convert name of type to the name we want to use as type name in the generated code. */
static String getTypeName(String name) {
return removeDashesAndUpperCaseAllFirstChars(name, true);
}
/** Convert name of an identifier from value in def file to name to use in C++ file. */
String getIdentifier(String name) {
return removeDashesAndUpperCaseAllFirstChars(name, false);
}
void writeHeaderFile(Writer w, CNode root) throws IOException {
writeHeaderHeader(w, root);
writeHeaderPublic(w, root);
writeHeaderFooter(w, root);
}
void writeHeaderPublic(Writer w, CNode root) throws IOException {
w.write("public:\n");
writeHeaderTypeDefs(w, root, " ");
writeTypeDeclarations(w, root, " ");
writeHeaderFunctionDeclarations(w, getTypeName(root, false), root, " ");
writeStaticMemberDeclarations(w, " ");
writeMembers(w, root, " ");
}
String [] generateCppNameSpace(CNode root) {
String namespace = root.getNamespace();
if (namespace.contains(".")) {
return namespace.split("\\.");
}
return new String[]{namespace};
}
String generateCppNameSpaceString(String[] namespaceList) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < namespaceList.length - 1; i++) {
str.append(namespaceList[i]);
str.append("::");
}
str.append(namespaceList[namespaceList.length - 1]);
return str.toString();
}
String generateCppNameSpaceDefine(String[] namespaceList) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < namespaceList.length - 1; i++) {
str.append(namespaceList[i].toUpperCase());
str.append("_");
}
str.append(namespaceList[namespaceList.length - 1].toUpperCase());
return str.toString();
}
void writeNameSpaceBegin(Writer w, String [] namespaceList) throws IOException {
w.write("namespace ");
w.write(getNestedNameSpace(namespaceList));
w.write(" {\n");
}
String getNestedNameSpace(String [] namespaceList) {
return Arrays.stream(namespaceList).map(String::toString).collect(Collectors.joining("::"));
}
void writeNameSpaceEnd(Writer w, String [] namespaceList) throws IOException {
w.write("}
w.write(getNestedNameSpace(namespaceList));
w.write("\n");
}
void writeBodyFile(Writer w, CNode root, String subdir, NormalizedDefinition nd) throws IOException {
writeBodyHeader(w, root, subdir);
writeStaticMemberDefinitions(w, root, nd);
writeDefinition(w, root, null);
writeBodyFooter(w, root);
}
void writeBodyHeader(Writer w, CNode root, String subdir) throws IOException {
if (subdir == null) {
w.write("
} else {
w.write("
}
w.write("\n");
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("
w.write("\n");
writeNameSpaceBegin(w, generateCppNameSpace(root));
w.write("\nnamespace internal {\n\n");
w.write("using ::config::ConfigParser;\n");
w.write("using ::config::InvalidConfigException;\n");
w.write("using ::config::ConfigInstance;\n");
w.write("using ::config::ConfigValue;\n");
w.write("using namespace vespalib::slime::convenience;\n");
w.write("\n");
}
void writeStaticMemberDefinitions(Writer w, CNode root, NormalizedDefinition nd) throws IOException {
String typeName = getInternalClassName(root);
w.write("const vespalib::string " + typeName + "::CONFIG_DEF_MD5(\"" + root.defMd5 + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_VERSION(\"" + root.defVersion + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAME(\"" + root.defName + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAMESPACE(\"" + root.getNamespace() + "\");\n"
+ "const int64_t " + typeName + "::CONFIG_DEF_SERIALIZE_VERSION(1);\n");
w.write("const static vespalib::string __internalDefSchema[] = {\n");
for (String line : nd.getNormalizedContent()) {
w.write("\"" + line.replace("\"", "\\\"") + "\",\n");
}
w.write("};\n");
w.write("const std::vector<vespalib::string> " + typeName + "::CONFIG_DEF_SCHEMA(__internalDefSchema,\n");
w.write(" __internalDefSchema + (sizeof(__internalDefSchema) / \n");
w.write(" sizeof(__internalDefSchema[0])));\n");
w.write("\n");
}
void writeDefinition(Writer w, CNode node, String fullClassName) throws IOException {
boolean root = false;
if (fullClassName == null) {
fullClassName = getInternalClassName(node);
root = true;
}
final String parent = fullClassName + "::";
java.util.Set<String> declaredTypes = new java.util.HashSet<String>();
for (CNode child : node.getChildren()) {
boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf);
if (complexType && !declaredTypes.contains(child.getName())) {
String typeName = getTypeName(child, false);
declaredTypes.add(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child;
w.write(parent + typeName + "\n"
+ parent + "get" + typeName + "(const vespalib::string& name)\n"
+ "{\n"
);
for (int i=0; i<leaf.getLegalValues().length; ++i) {
w.write(" " + (i != 0 ? "} else " : ""));
w.write("if (name == \"" + leaf.getLegalValues()[i] + "\") {\n"
+ " return " + leaf.getLegalValues()[i] + ";\n");
}
w.write(" } else {\n"
+ " throw InvalidConfigException(\"Illegal enum value '\" + name + \"'\");\n"
+ " }\n"
+ "}\n"
+ "\n"
);
w.write("vespalib::string\n"
+ parent + "get" + typeName + "Name(" + typeName + " t)\n"
+ "{\n"
+ " switch (t) {\n"
);
for (int i=0; i<leaf.getLegalValues().length; ++i) {
w.write(" case " + leaf.getLegalValues()[i] + ": return \"" + leaf.getLegalValues()[i] + "\";\n");
}
w.write(" default:\n"
+ " {\n"
+ " vespalib::asciistream ost;\n"
+ " ost << \"UNKNOWN(\" << t << \")\";\n"
+ " return ost.str();\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "\n"
);
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector) {\n");
w.write(" if (__inspector.valid()) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write(" }\n");
w.write(" throw InvalidConfigException(\"Value for '\" + __fieldName + \"' required but not found\");\n");
w.write("}\n");
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::slime::Inspector & __inspector) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write("}\n");
w.write(parent + typeName + " " + parent + "Internal" + typeName + "Converter::operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault) {\n");
w.write(" if (__inspector.valid()) {\n");
w.write(" return " + parent + "get" + typeName + "(__inspector.asString().make_string());\n");
w.write(" }\n");
w.write(" return __eDefault;\n");
w.write("}\n\n");
} else {
writeDefinition(w, child, parent + typeName);
}
}
}
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
w.write(parent + typeName + "()\n");
for (int i=0; i<node.getChildren().length; ++i) {
CNode child = node.getChildren()[i];
String childName = getIdentifier(child.getName());
if (i == 0) {
w.write(" : " + childName + "(");
} else {
w.write("),\n " + childName + "(");
}
if (child.isArray || child.isMap) {
} else if (child instanceof LeafCNode) {
LeafCNode leaf = (LeafCNode) child;
if (leaf.getDefaultValue() != null) {
w.write(getDefaultValue(leaf));
} else {
if (leaf.getType().equals("bool")) {
w.write("false");
} else if (leaf.getType().equals("int")) {
w.write("0");
} else if (leaf.getType().equals("double")) {
w.write("0");
} else if (leaf.getType().equals("string")) {
} else if (leaf.getType().equals("enum")) {
LeafCNode.EnumLeaf enumNode = (LeafCNode.EnumLeaf) leaf;
w.write(enumNode.getLegalValues()[0]);
} else if (leaf.getType().equals("reference")) {
} else if (leaf.getType().equals("file")) {
}
}
}
}
if (node.getChildren().length > 0)
w.write(")\n");
w.write(""
+ "{\n"
+ "}\n"
+ "\n"
);
if (root) {
writeConfigClassCopyConstructorDefinition(w, fullClassName, typeName);
writeConfigClassAssignmentOperatorDefinition(w, fullClassName, typeName);
} else {
writeClassCopyConstructorDefinition(w, fullClassName, node);
writeClassAssignmentOperatorDefinition(w, fullClassName, node);
}
writeDestructor(w, parent, typeName);
String indent = " ";
if (root) {
w.write(typeName + "::" + typeName + "(const ConfigValue & __value)\n"
+ "{\n"
+ indent + "try {\n");
indent = " ";
w.write(indent + "const std::vector<vespalib::string> & __lines(__value.getLines());\n");
} else {
w.write(parent + typeName + "(const std::vector<vespalib::string> & __lines)\n"
+ "{\n");
}
w.write(""
+ indent + "std::set<vespalib::string> __remainingValuesToParse("
+ "__lines.begin(), __lines.end());\n");
w.write(indent + "for(std::set<vespalib::string>::iterator __rVTPiter = __remainingValuesToParse.begin();\n"
+ indent + " __rVTPiter != __remainingValuesToParse.end();)\n"
+ indent + "{\n"
+ indent + " if (ConfigParser::stripWhitespace(*__rVTPiter).empty()) {\n"
+ indent + " std::set<vespalib::string>::iterator __rVTPiter2 = __rVTPiter++;\n"
+ indent + " __remainingValuesToParse.erase(__rVTPiter2);\n"
+ indent + " } else {\n"
+ indent + " ++__rVTPiter;\n"
+ indent + " }\n"
+ indent + "}\n");
for (CNode child : node.getChildren()) {
String childType = getTypeName(child, false);
String childName = getIdentifier(child.getName());
if (child instanceof LeafCNode.EnumLeaf) {
if (child.isArray) {
w.write(indent + "std::vector<vespalib::string> " + childName + "__ValueList(\n ");
} else if (child.isMap) {
w.write(indent + "std::map<vespalib::string, vespalib::string> " + childName + "__ValueMap(\n ");
} else {
w.write(indent + childName + " = get" + childType + "(");
}
childType = "vespalib::string";
} else {
w.write(indent + childName + " = ");
}
if (child.isArray) {
w.write("ConfigParser::parseArray<" + childType + ">(\""
+ child.getName() + "\", __lines)");
} else if (child.isMap) {
w.write("ConfigParser::parseMap<" + childType + ">(\""
+ child.getName() + "\", __lines)");
} else {
if (child instanceof LeafCNode) {
w.write("ConfigParser::parse<" + childType + ">(\""
+ child.getName() + "\", __lines");
} else {
w.write("ConfigParser::parseStruct<" + childType + ">(\""
+ child.getName() + "\", __lines");
}
if (child instanceof LeafCNode && ((LeafCNode) child).getDefaultValue() != null) {
LeafCNode leaf = (LeafCNode) child;
if (leaf.getDefaultValue().getValue() != null) {
String defaultVal = getDefaultValue(leaf);
if (leaf instanceof LeafCNode.EnumLeaf) {
defaultVal = '"' + defaultVal + '"';
}
w.write(", " + defaultVal);
}
}
w.write(")");
}
if (child instanceof LeafCNode.EnumLeaf) {
childType = getTypeName(child, false);
w.write(");\n");
if (child.isArray) {
w.write(indent + childName + ".reserve(" + childName + "__ValueList.size());\n"
+ indent + "for (std::vector<vespalib::string>::const_iterator __it\n"
+ indent + " = " + childName + "__ValueList.begin();\n"
+ indent + " __it != " + childName + "__ValueList.end(); ++__it)\n"
+ indent + "{\n"
+ indent + " " + childName + ".push_back(get" + childType + "(*__it));\n"
+ indent + "}\n"
);
} else if (child.isMap) {
w.write(indent + "typedef std::map<vespalib::string, vespalib::string> __ValueMap;\n");
w.write(indent + "for (__ValueMap::iterator __it(" + childName + "__ValueMap.begin()), __mt(" + childName + "__ValueMap.end()); __it != __mt; __it++) {\n"
+ " " + childName + "[__it->first] = get" + childType + "(__it->second);\n"
+ "}\n"
);
}
} else {
w.write(";\n");
}
w.write(indent + "ConfigParser::stripLinesForKey(\""
+ child.getName() + "\", "
+ "__remainingValuesToParse);\n");
}
if (root) {
indent = " ";
w.write(indent + "} catch (InvalidConfigException & __ice) {\n");
w.write(indent + " throw InvalidConfigException(\"Error parsing config '\" + CONFIG_DEF_NAME + \"' in namespace '\" + CONFIG_DEF_NAMESPACE + \"'"
+ ": \" + __ice.getMessage());\n"
+ indent + "}\n");
}
w.write("}\n"
+ "\n"
);
String lineBreak = (parent.length() + typeName.length() < 50 ? "" : "\n");
w.write("bool\n"
+ parent + lineBreak + "operator==(const " + typeName + "& __rhs) const\n"
+ "{\n"
+ " return ("
);
for (int i = 0; i<node.getChildren().length; ++i) {
CNode child = node.getChildren()[i];
String childName = getIdentifier(child.getName());
if (i != 0) {
w.write(" &&\n ");
}
w.write(childName + " == __rhs." + childName);
}
w.write(");\n"
+ "}\n"
+ "\n"
);
lineBreak = (parent.length() + typeName.length() < 50 ? "" : "\n");
w.write("bool\n"
+ parent + lineBreak + "operator!=(const " + typeName + "& __rhs) const\n"
+ "{\n"
+ " return !(operator==(__rhs));\n"
+ "}\n"
+ "\n"
);
writeSlimeEncoder(w, node, parent, root);
writeSlimeDecoder(w, node, parent, root);
writeSlimeConstructor(w, node, parent, root);
}
public void writeSlimeEncoder(Writer w, CNode node, String parent, boolean root) throws IOException
{
String indent = " ";
if (root) {
w.write("void\n"
+ parent + "serialize(::config::ConfigDataBuffer & __buffer) const\n"
+ "{\n");
w.write(indent + "vespalib::Slime & __slime(__buffer.slimeObject());\n");
w.write(indent + "vespalib::slime::Cursor & __croot = __slime.setObject();\n");
w.write(indent + "__croot.setDouble(\"version\", CONFIG_DEF_SERIALIZE_VERSION);\n");
w.write(indent + "vespalib::slime::Cursor & __key = __croot.setObject(\"configKey\");\n");
w.write(indent + "__key.setString(\"defName\", vespalib::Memory(CONFIG_DEF_NAME));\n");
w.write(indent + "__key.setString(\"defNamespace\", vespalib::Memory(CONFIG_DEF_NAMESPACE));\n");
w.write(indent + "__key.setString(\"defMd5\", vespalib::Memory(CONFIG_DEF_MD5));\n");
w.write(indent + "vespalib::slime::Cursor & __keySchema =__key.setArray(\"defSchema\");\n");
w.write(indent + "for (size_t i = 0; i < CONFIG_DEF_SCHEMA.size(); i++) {\n");
w.write(indent + " __keySchema.addString(vespalib::Memory(CONFIG_DEF_SCHEMA[i]));\n");
w.write(indent + "}\n");
w.write(indent + "vespalib::slime::Cursor & __cursor = __croot.setObject(\"configPayload\");\n");
} else {
w.write("void\n"
+ parent + "serialize(vespalib::slime::Cursor & __cursor) const\n"
+ "{\n");
}
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
w.write(indent + "{\n");
indent = " ";
w.write(indent + "vespalib::slime::Cursor & __c = __cursor.setObject(\"" + child.getName() + "\");\n");
if (child.isArray) {
w.write(indent + "__c.setString(\"type\", \"array\");\n");
w.write(indent + "vespalib::slime::Cursor & __c2 = __c.setArray(\"value\");\n");
w.write(indent + "for (size_t __i = 0; __i < " + childName + ".size(); __i++) {\n");
w.write(indent + " vespalib::slime::Cursor & __c3 = __c2.addObject();\n");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + " __c3.setString(\"type\", \"enum\");\n");
w.write(indent + " __c3.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(" + childName + "[__i])));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + " __c3.setString(\"type\", \"" + type + "\");\n");
w.write(indent + " __c3.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(" + childName + "[__i]));\n");
} else {
w.write("(\"value\", " + childName + "[__i]);\n");
}
} else {
w.write(indent + " __c3.setString(\"type\", \"struct\");\n");
w.write(indent + " Cursor & __c4 = __c3.setObject(\"value\");\n");
w.write(indent + " " + childName + "[__i].serialize(__c4);\n");
}
w.write(indent + "}\n");
} else if (child.isMap) {
w.write(indent + "__c.setString(\"type\", \"map\");\n");
w.write(indent + "vespalib::slime::Cursor & __c2 = __c.setArray(\"value\");\n");
String childMapType = getTypeName(child, true);
w.write(indent + "for (" + childMapType + "::const_iterator it(" + childName + ".begin()), mt(" + childName + ".end()); it != mt; it++) {\n");
w.write(indent + " vespalib::slime::Cursor & __c3 = __c2.addObject();\n");
w.write(indent + " __c3.setString(\"key\", vespalib::Memory(it->first));\n");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + " __c3.setString(\"type\", \"enum\");\n");
w.write(indent + " __c3.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(it->second)));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + " __c3.setString(\"type\", \"" + type + "\");\n");
w.write(indent + " __c3.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(it->second));\n");
} else {
w.write("(\"value\", it->second);\n");
}
} else {
w.write(indent + " __c3.setString(\"type\", \"struct\");\n");
w.write(indent + " Cursor & __c4 = __c3.setObject(\"value\");\n");
w.write(indent + " it->second.serialize(__c4);\n");
}
w.write(indent + "}\n");
} else {
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write(indent + "__c.setString(\"type\", \"enum\");\n");
w.write(indent + "__c.set" + repType);
w.write("(\"value\", vespalib::Memory(get" + childType + "Name(" + childName + ")));\n");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
w.write(indent + "__c.setString(\"type\", \"" + type + "\");\n");
w.write(indent + "__c.set" + repType);
if ("String".equals(repType)) {
w.write("(\"value\", vespalib::Memory(" + childName + "));\n");
} else {
w.write("(\"value\", " + childName + ");\n");
}
} else {
w.write(indent + "__c.setString(\"type\", \"struct\");\n");
w.write(indent + "Cursor & __c2 = __c.setObject(\"value\");\n");
w.write(indent + childName + ".serialize(__c2);\n");
}
}
indent = " ";
w.write(indent + "}\n");
}
w.write("}\n\n");
}
public void writeSlimeDecoder(Writer w, CNode node, String parent, boolean root) throws IOException {
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
String indent = " ";
if (root) {
w.write(""
+ typeName + "::" + typeName + "(const ::config::ConfigDataBuffer & __buffer)\n"
+ "{\n");
w.write(indent + "const vespalib::Slime & __slime(__buffer.slimeObject());\n");
w.write(indent + "vespalib::slime::Inspector & __croot = __slime.get();\n");
w.write(indent + "vespalib::slime::Inspector & __inspector = __croot[\"configPayload\"];\n");
} else {
w.write(""
+ parent + typeName + "(const vespalib::slime::Inspector & __inspector)\n"
+ "{\n");
}
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
String inspectorLine = "__inspector[\"" + child.getName() + "\"][\"value\"]";
if (child.isArray) {
w.write(indent + "for (size_t __i = 0; __i < " + inspectorLine + ".children(); __i++) {\n");
w.write(indent + " " + childName + ".push_back(");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + "[__i][\"value\"])");
}
w.write(");\n");
w.write(indent + "}\n");
} else if (child.isMap) {
w.write(indent + "for (size_t __i = 0; __i < " + inspectorLine + ".children(); __i++) {\n");
w.write(indent + " " + childName + "[" + inspectorLine + "[__i][\"key\"].asString().make_string()] = ");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + "[__i][\"value\"].as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + "[__i][\"value\"])");
}
w.write(";\n");
w.write(indent + "}\n");
} else {
w.write(indent + childName + " = ");
if (child instanceof LeafCNode.EnumLeaf) {
String repType = slimeTypeMap.get("enum");
w.write("get" + childType + "(" + inspectorLine + ".as" + repType + "().make_string())");
} else if (child instanceof LeafCNode) {
String type = ((LeafCNode) child).getType();
String repType = slimeTypeMap.get(type);
if ("String".equals(repType)) {
w.write("" + inspectorLine + ".as" + repType + "().make_string()");
} else {
w.write("" + inspectorLine + ".as" + repType + "()");
}
} else {
w.write(childType + "(" + inspectorLine + ")");
}
w.write(";\n");
}
}
w.write("}\n\n");
}
public void writeSlimeConstructor(Writer w, CNode node, String parent, boolean root) throws IOException {
String tmpName = getTypeName(node, false);
String typeName = root ? getInternalClassName(node) : tmpName;
String indent = " ";
if (root) {
w.write(""
+ typeName + "::" + typeName + "(const ::config::ConfigPayload & __payload)\n"
+ "{\n");
} else {
w.write(""
+ parent + typeName + "(const ::config::ConfigPayload & __payload)\n"
+ "{\n");
}
w.write(indent + "const vespalib::slime::Inspector & __inspector(__payload.get());\n");
for (CNode child : node.getChildren()) {
String childName = getIdentifier(child.getName());
String childType = getTypeName(child, false);
String childInspector = "__inspector[\"" + child.getName() + "\"]";
if (child.isArray) {
String inserterName = "__" + childName + "Inserter";
w.write(indent + "::config::internal::VectorInserter<" + childType);
if (child instanceof LeafCNode.EnumLeaf) {
w.write(", Internal" + childType + "Converter");
}
w.write("> " + inserterName + "(" + childName + ");\n");
w.write(indent + childInspector + ".traverse(" + inserterName + ");\n");
} else if (child.isMap) {
String inserterName = "__" + childName + "Inserter";
w.write(indent + "::config::internal::MapInserter<" + childType);
if (child instanceof LeafCNode.EnumLeaf) {
w.write(", Internal" + childType + "Converter");
}
w.write("> " + inserterName + "(" + childName + ");\n");
w.write(indent + childInspector + ".traverse(" + inserterName + ");\n");
} else {
w.write(indent + childName + " = ");
if (child instanceof LeafCNode.EnumLeaf) {
w.write("Internal" + childType + "Converter");
} else {
w.write("::config::internal::ValueConverter<" + childType + ">");
}
if (child instanceof LeafCNode && ((LeafCNode) child).getDefaultValue() != null) {
LeafCNode leaf = (LeafCNode) child;
String defaultValue = getDefaultValue(leaf);
w.write("()(" + childInspector + ", " + defaultValue + ");\n");
} else if (child instanceof InnerCNode) {
w.write("()(" + childInspector + ");\n");
} else {
w.write("()(\"" + child.getName() + "\", " + childInspector + ");\n");
}
}
}
w.write("}\n\n");
}
void writeBodyFooter(Writer w, CNode root) throws IOException {
w.write("}
writeNameSpaceEnd(w, generateCppNameSpace(root));
}
String getDefaultValue(LeafCNode leaf) {
String defaultVal = leaf.getDefaultValue().getStringRepresentation();
if (leaf.getType().equals("string") && defaultVal.equals("null"))
throw new CodegenRuntimeException("Default value null not allowed for C++ config");
if (leaf.getType().equals("long") && "-9223372036854775808".equals(defaultVal)) {
return "LONG_MIN";
} else if (leaf.getType().equals("int") && "-2147483648".equals(defaultVal)) {
return "INT_MIN";
} else {
return defaultVal;
}
}
} |
Nit: Variable with `_` 😄 | protected void maintain() {
List<NodeRepositoryNode> nodes = getNodes();
List<ResourceSnapshot> resourceSnapshots = getResourceSnapshots(nodes);
resourceSnapshotConsumer.consume(resourceSnapshots);
metric.set(metering_last_reported, clock.millis() / 1000, metric.createContext(Collections.emptyMap()));
metric.set(metering_total_reported, resourceSnapshots.stream()
.mapToDouble(r -> r.getCpuCores() + r.getMemoryGb() + r.getDiskGb())
.sum()
, metric.createContext(Collections.emptyMap()));
} | metric.set(metering_total_reported, resourceSnapshots.stream() | protected void maintain() {
List<NodeRepositoryNode> nodes = getNodes();
List<ResourceSnapshot> resourceSnapshots = getResourceSnapshots(nodes);
resourceSnapshotConsumer.consume(resourceSnapshots);
metric.set(METERING_LAST_REPORTED, clock.millis() / 1000, metric.createContext(Collections.emptyMap()));
metric.set(METERING_TOTAL_REPORTED, resourceSnapshots.stream()
.mapToDouble(r -> r.getCpuCores() + r.getMemoryGb() + r.getDiskGb())
.sum()
, metric.createContext(Collections.emptyMap()));
} | class ResourceMeterMaintainer extends Maintainer {
private final Clock clock;
private final Metric metric;
private final NodeRepository nodeRepository;
private final ResourceSnapshotConsumer resourceSnapshotConsumer;
private static final String metering_last_reported = "metering_last_reported";
private static final String metering_total_reported = "metering_total_reported";
@SuppressWarnings("WeakerAccess")
public ResourceMeterMaintainer(Controller controller,
Duration interval,
JobControl jobControl,
NodeRepository nodeRepository,
Clock clock,
Metric metric,
ResourceSnapshotConsumer resourceSnapshotConsumer) {
super(controller, interval, jobControl, null, SystemName.all());
this.clock = clock;
this.nodeRepository = nodeRepository;
this.metric = metric;
this.resourceSnapshotConsumer = resourceSnapshotConsumer;
}
@Override
private List<NodeRepositoryNode> getNodes() {
return controller().zoneRegistry().zones()
.ofCloud(CloudName.from("aws"))
.reachable().zones().stream()
.flatMap(zone -> nodeRepository.listNodes(zone.getId()).nodes().stream())
.filter(node -> node.getOwner() != null && !node.getOwner().getTenant().equals("hosted-vespa"))
.filter(node -> node.getState() == NodeState.active)
.collect(Collectors.toList());
}
private List<ResourceSnapshot> getResourceSnapshots(List<NodeRepositoryNode> nodes) {
return nodes.stream()
.collect(Collectors.groupingBy(
node -> applicationIdFromNodeOwner(node.getOwner()),
Collectors.collectingAndThen(Collectors.toList(), nodeList -> ResourceSnapshot.from(nodeList, clock.instant()))
)).values().stream().collect(Collectors.toList());
}
private ApplicationId applicationIdFromNodeOwner(NodeOwner owner) {
return ApplicationId.from(owner.getTenant(), owner.getApplication(), owner.getInstance());
}
} | class ResourceMeterMaintainer extends Maintainer {
private final Clock clock;
private final Metric metric;
private final NodeRepository nodeRepository;
private final ResourceSnapshotConsumer resourceSnapshotConsumer;
private static final String METERING_LAST_REPORTED = "metering_last_reported";
private static final String METERING_TOTAL_REPORTED = "metering_total_reported";
@SuppressWarnings("WeakerAccess")
public ResourceMeterMaintainer(Controller controller,
Duration interval,
JobControl jobControl,
NodeRepository nodeRepository,
Clock clock,
Metric metric,
ResourceSnapshotConsumer resourceSnapshotConsumer) {
super(controller, interval, jobControl, null, SystemName.all());
this.clock = clock;
this.nodeRepository = nodeRepository;
this.metric = metric;
this.resourceSnapshotConsumer = resourceSnapshotConsumer;
}
@Override
private List<NodeRepositoryNode> getNodes() {
return controller().zoneRegistry().zones()
.ofCloud(CloudName.from("aws"))
.reachable().zones().stream()
.flatMap(zone -> nodeRepository.listNodes(zone.getId()).nodes().stream())
.filter(node -> node.getOwner() != null && !node.getOwner().getTenant().equals("hosted-vespa"))
.filter(node -> node.getState() == NodeState.active)
.collect(Collectors.toList());
}
private List<ResourceSnapshot> getResourceSnapshots(List<NodeRepositoryNode> nodes) {
return nodes.stream()
.collect(Collectors.groupingBy(
node -> applicationIdFromNodeOwner(node.getOwner()),
Collectors.collectingAndThen(Collectors.toList(), nodeList -> ResourceSnapshot.from(nodeList, clock.instant()))
)).values().stream().collect(Collectors.toList());
}
private ApplicationId applicationIdFromNodeOwner(NodeOwner owner) {
return ApplicationId.from(owner.getTenant(), owner.getApplication(), owner.getInstance());
}
} |
`Failed to retrieve metrics from` | private void getHostMetrics(URI hostURI, MetricsAggregator metrics) {
Slime responseBody = doMetricsRequest(hostURI);
var parseError = responseBody.get().field("error_message").asString();
if (! parseError.isEmpty()) {
log.info("Failed to retrieve logs from " + hostURI + ": " + parseError);
}
Inspector services = responseBody.get().field("services");
services.traverse((ArrayTraverser) (i, servicesInspector) -> {
parseService(servicesInspector, metrics);
});
} | log.info("Failed to retrieve logs from " + hostURI + ": " + parseError); | private void getHostMetrics(URI hostURI, MetricsAggregator metrics) {
Slime responseBody = doMetricsRequest(hostURI);
var parseError = responseBody.get().field("error_message");
if (parseError.valid()) {
log.info("Failed to retrieve metrics from " + hostURI + ": " + parseError.asString());
}
Inspector services = responseBody.get().field("services");
services.traverse((ArrayTraverser) (i, servicesInspector) -> {
parseService(servicesInspector, metrics);
});
} | class MetricsRetriever {
private static final Logger log = Logger.getLogger(MetricsRetriever.class.getName());
private final HttpClient httpClient = HttpClientBuilder.create().build();
/**
* Call the metrics API on each host in the cluster and aggregate the metrics
* into a single value.
*/
public MetricsAggregator requestMetricsForCluster(ClusterInfo clusterInfo) {
var aggregator = new MetricsAggregator();
clusterInfo.getHostnames().forEach(host -> getHostMetrics(host, aggregator));
return aggregator;
}
private Slime doMetricsRequest(URI hostURI) {
HttpGet get = new HttpGet(hostURI);
try {
HttpResponse response = httpClient.execute(get);
InputStream is = response.getEntity().getContent();
Slime slime = SlimeUtils.jsonToSlime(is.readAllBytes());
is.close();
return slime;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void parseService(Inspector service, MetricsAggregator metrics) {
String serviceName = service.field("name").asString();
Instant timestamp = Instant.ofEpochSecond(service.field("timestamp").asLong());
metrics.setTimestamp(timestamp);
service.field("metrics").traverse((ArrayTraverser) (i, m) -> {
Inspector values = m.field("values");
switch (serviceName) {
case "container":
metrics.addContainerLatency(
values.field("query_latency.sum").asDouble(),
values.field("query_latency.count").asDouble());
metrics.addFeedLatency(
values.field("feed_latency.sum").asDouble(),
values.field("feed_latency.count").asDouble());
break;
case "qrserver":
metrics.addQrLatency(
values.field("query_latency.sum").asDouble(),
values.field("query_latency.count").asDouble());
break;
case "distributor":
metrics.addDocumentCount(values.field("vds.distributor.docsstored.average").asDouble());
break;
}
});
}
} | class MetricsRetriever {
private static final Logger log = Logger.getLogger(MetricsRetriever.class.getName());
private final HttpClient httpClient = HttpClientBuilder.create().build();
/**
* Call the metrics API on each host in the cluster and aggregate the metrics
* into a single value.
*/
public MetricsAggregator requestMetricsForCluster(ClusterInfo clusterInfo) {
var aggregator = new MetricsAggregator();
clusterInfo.getHostnames().forEach(host -> getHostMetrics(host, aggregator));
return aggregator;
}
private Slime doMetricsRequest(URI hostURI) {
HttpGet get = new HttpGet(hostURI);
try {
HttpResponse response = httpClient.execute(get);
InputStream is = response.getEntity().getContent();
Slime slime = SlimeUtils.jsonToSlime(is.readAllBytes());
is.close();
return slime;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private void parseService(Inspector service, MetricsAggregator metrics) {
String serviceName = service.field("name").asString();
Instant timestamp = Instant.ofEpochSecond(service.field("timestamp").asLong());
metrics.setTimestamp(timestamp);
service.field("metrics").traverse((ArrayTraverser) (i, m) -> {
Inspector values = m.field("values");
switch (serviceName) {
case "container":
metrics.addContainerLatency(
values.field("query_latency.sum").asDouble(),
values.field("query_latency.count").asDouble());
metrics.addFeedLatency(
values.field("feed_latency.sum").asDouble(),
values.field("feed_latency.count").asDouble());
break;
case "qrserver":
metrics.addQrLatency(
values.field("query_latency.sum").asDouble(),
values.field("query_latency.count").asDouble());
break;
case "distributor":
metrics.addDocumentCount(values.field("vds.distributor.docsstored.average").asDouble());
break;
}
});
}
} |
Looks like the order has changed here. logtp/rpc should be first? | public void allocatePorts(int start, PortAllocBridge from) {
int port = (start == 0) ? getWantedPort() : start;
from.requirePort(port++, "unused");
from.requirePort(port++, "logtp");
from.requirePort(port++, "last.errors");
from.requirePort(port++, "replicator");
} | from.requirePort(port++, "unused"); | public void allocatePorts(int start, PortAllocBridge from) {
int port = (start == 0) ? getWantedPort() : start;
from.requirePort(port++, "rpc");
from.requirePort(port++, "unused/1");
from.requirePort(port++, "unused/2");
from.requirePort(port++, "unused/3");
} | class Logserver extends AbstractService {
private static final long serialVersionUID = 1L;
private static final String logArchiveDir = "$ROOT/logs/vespa/logarchive";
public Logserver(AbstractConfigProducer parent) {
super(parent, "logserver");
portsMeta.on(0).tag("logtp").tag("rpc");
portsMeta.on(1).tag("unused");
portsMeta.on(2).tag("unused");
portsMeta.on(3).tag("unused");
setProp("clustertype", "admin");
setProp("clustername", "admin");
}
/**
* @return the startup command for the logserver
*/
public String getStartupCommand() {
return "exec $ROOT/bin/vespa-logserver-start " + getMyJVMArgs() + " " + getJvmOptions();
}
/**
* @return the jvm args to be used by the logserver.
*/
private String getMyJVMArgs() {
StringBuilder sb = new StringBuilder();
sb.append("-Dlogserver.rpcListenPort=").append(getRelativePort(0));
sb.append(" ");
sb.append("-Dlogserver.logarchive.dir=" + logArchiveDir);
return sb.toString();
}
/**
* Returns the desired base port for this service.
*/
public int getWantedPort() {
return 19080;
}
/**
* The desired base port is the only allowed base port.
*
* @return 'true' always
*/
public boolean requiresWantedPort() {
return true;
}
/**
* @return the number of ports needed by the logserver.
*/
public int getPortCount() {
return 4;
}
@Override
} | class Logserver extends AbstractService {
private static final long serialVersionUID = 1L;
private static final String logArchiveDir = "$ROOT/logs/vespa/logarchive";
public Logserver(AbstractConfigProducer parent) {
super(parent, "logserver");
portsMeta.on(0).tag("logtp").tag("rpc");
portsMeta.on(1).tag("unused");
portsMeta.on(2).tag("unused");
portsMeta.on(3).tag("unused");
setProp("clustertype", "admin");
setProp("clustername", "admin");
}
/**
* @return the startup command for the logserver
*/
public String getStartupCommand() {
return "exec $ROOT/bin/vespa-logserver-start " + getMyJVMArgs() + " " + getJvmOptions();
}
/**
* @return the jvm args to be used by the logserver.
*/
private String getMyJVMArgs() {
StringBuilder sb = new StringBuilder();
sb.append("-Dlogserver.rpcListenPort=").append(getRelativePort(0));
sb.append(" ");
sb.append("-Dlogserver.logarchive.dir=" + logArchiveDir);
return sb.toString();
}
/**
* Returns the desired base port for this service.
*/
public int getWantedPort() {
return 19080;
}
/**
* The desired base port is the only allowed base port.
*
* @return 'true' always
*/
public boolean requiresWantedPort() {
return true;
}
/**
* @return the number of ports needed by the logserver.
*/
public int getPortCount() {
return 4;
}
@Override
} |
Is this dependent of a default in proton.def? If so, consider doing the same as here: https://github.com/vespa-engine/vespa/blob/35283b454dfdaa9acbcea24274e01e65d128b7b6/config-model/src/main/java/com/yahoo/vespa/model/search/NodeFlavorTuning.java#L111 | public void getConfig(ProtonConfig.Builder builder) {
double visibilityDelay = hasIndexedCluster() ? getIndexed().getVisibilityDelay() : 0.0;
builder.feeding.concurrency(0.35);
boolean hasAnyNonIndexedCluster = false;
for (NewDocumentType type : TopologicalDocumentTypeSorter.sort(documentDefinitions.values())) {
ProtonConfig.Documentdb.Builder ddbB = new ProtonConfig.Documentdb.Builder();
String docTypeName = type.getFullName().getName();
boolean globalDocType = isGloballyDistributed(type);
ddbB.inputdoctypename(docTypeName)
.configid(getConfigId())
.visibilitydelay(visibilityDelay)
.global(globalDocType);
Optional<StreamingSearchCluster> ssc = findStreamingCluster(docTypeName);
if (ssc.isPresent()) {
hasAnyNonIndexedCluster = true;
ddbB.inputdoctypename(type.getFullName().getName())
.configid(ssc.get().getDocumentDBConfigId())
.mode(ProtonConfig.Documentdb.Mode.Enum.STREAMING)
.feeding.concurrency(0.0);
} else if (hasIndexedCluster()) {
if (getIndexed().hasDocumentDB(type.getFullName().getName())) {
getIndexed().fillDocumentDBConfig(type.getFullName().getName(), ddbB);
if (tuning != null && tuning.searchNode != null && tuning.searchNode.feeding != null) {
ddbB.feeding.concurrency(tuning.searchNode.feeding.concurrency / 2);
} else {
ddbB.feeding.concurrency(builder.feeding.build().concurrency());
}
} else {
hasAnyNonIndexedCluster = true;
ddbB.feeding.concurrency(0.0);
ddbB.mode(ProtonConfig.Documentdb.Mode.Enum.STORE_ONLY);
}
} else {
hasAnyNonIndexedCluster = true;
ddbB.feeding.concurrency(0.0);
ddbB.mode(ProtonConfig.Documentdb.Mode.Enum.STORE_ONLY);
}
if (globalDocType) {
ddbB.visibilitydelay(0.0);
}
builder.documentdb(ddbB);
}
int numDocumentDbs = builder.documentdb.size();
builder.initialize(new ProtonConfig.Initialize.Builder().threads(numDocumentDbs + 1));
if (resourceLimits.isPresent()) {
resourceLimits.get().getConfig(builder);
}
if (tuning != null) {
tuning.getConfig(builder);
}
if (redundancy != null) {
redundancy.getConfig(builder);
}
if (hasAnyNonIndexedCluster) {
builder.feeding.concurrency(builder.feeding.build().concurrency() * 2);
}
} | builder.feeding.concurrency(0.35); | public void getConfig(ProtonConfig.Builder builder) {
double visibilityDelay = hasIndexedCluster() ? getIndexed().getVisibilityDelay() : 0.0;
builder.feeding.concurrency(0.35);
boolean hasAnyNonIndexedCluster = false;
for (NewDocumentType type : TopologicalDocumentTypeSorter.sort(documentDefinitions.values())) {
ProtonConfig.Documentdb.Builder ddbB = new ProtonConfig.Documentdb.Builder();
String docTypeName = type.getFullName().getName();
boolean globalDocType = isGloballyDistributed(type);
ddbB.inputdoctypename(docTypeName)
.configid(getConfigId())
.visibilitydelay(visibilityDelay)
.global(globalDocType);
Optional<StreamingSearchCluster> ssc = findStreamingCluster(docTypeName);
if (ssc.isPresent()) {
hasAnyNonIndexedCluster = true;
ddbB.inputdoctypename(type.getFullName().getName())
.configid(ssc.get().getDocumentDBConfigId())
.mode(ProtonConfig.Documentdb.Mode.Enum.STREAMING)
.feeding.concurrency(0.0);
} else if (hasIndexedCluster()) {
if (getIndexed().hasDocumentDB(type.getFullName().getName())) {
getIndexed().fillDocumentDBConfig(type.getFullName().getName(), ddbB);
if (tuning != null && tuning.searchNode != null && tuning.searchNode.feeding != null) {
ddbB.feeding.concurrency(tuning.searchNode.feeding.concurrency / 2);
} else {
ddbB.feeding.concurrency(builder.feeding.build().concurrency());
}
} else {
hasAnyNonIndexedCluster = true;
ddbB.feeding.concurrency(0.0);
ddbB.mode(ProtonConfig.Documentdb.Mode.Enum.STORE_ONLY);
}
} else {
hasAnyNonIndexedCluster = true;
ddbB.feeding.concurrency(0.0);
ddbB.mode(ProtonConfig.Documentdb.Mode.Enum.STORE_ONLY);
}
if (globalDocType) {
ddbB.visibilitydelay(0.0);
}
builder.documentdb(ddbB);
}
int numDocumentDbs = builder.documentdb.size();
builder.initialize(new ProtonConfig.Initialize.Builder().threads(numDocumentDbs + 1));
if (resourceLimits.isPresent()) {
resourceLimits.get().getConfig(builder);
}
if (tuning != null) {
tuning.getConfig(builder);
}
if (redundancy != null) {
redundancy.getConfig(builder);
}
if (hasAnyNonIndexedCluster) {
builder.feeding.concurrency(builder.feeding.build().concurrency() * 2);
}
} | class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> {
private final Map<String, NewDocumentType> documentDefinitions;
private final Set<NewDocumentType> globallyDistributedDocuments;
public Builder(Map<String, NewDocumentType> documentDefinitions,
Set<NewDocumentType> globallyDistributedDocuments) {
this.documentDefinitions = documentDefinitions;
this.globallyDistributedDocuments = globallyDistributedDocuments;
}
@Override
protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
ModelElement clusterElem = new ModelElement(producerSpec);
String clusterName = ContentCluster.getClusterName(clusterElem);
Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown");
ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, documentDefinitions, globallyDistributedDocuments,
getFlushOnShutdown(flushOnShutdownElem, deployState));
ModelElement tuning = clusterElem.childByPath("engine.proton.tuning");
if (tuning != null) {
search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml()));
}
ModelElement protonElem = clusterElem.childByPath("engine.proton");
if (protonElem != null) {
search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem));
}
buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search);
buildIndexedSearchCluster(deployState, clusterElem, clusterName, search);
return search;
}
private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) {
if (flushOnShutdownElem != null) {
return flushOnShutdownElem;
}
return ! stateIsHosted(deployState);
}
private Double getQueryTimeout(ModelElement clusterElem) {
return clusterElem.childAsDouble("engine.proton.query-timeout");
}
private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) {
ModelElement docElem = clusterElem.child("documents");
if (docElem == null) {
return;
}
for (ModelElement docType : docElem.subElements("document")) {
String mode = docType.stringAttribute("mode");
if ("streaming".equals(mode)) {
buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType);
}
}
}
private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName,
ContentSearchCluster search, ModelElement docType) {
String docTypeName = docType.stringAttribute("type");
StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName);
search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType));
}
private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem,
String clusterName, ContentSearchCluster search) {
List<ModelElement> indexedDefs = getIndexedSearchDefinitions(clusterElem);
if (!indexedDefs.isEmpty()) {
IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState);
isc.setRoutingSelector(clusterElem.childAsString("documents.selection"));
Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay");
if (visibilityDelay != null) {
isc.setVisibilityDelay(visibilityDelay);
}
search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs);
}
}
private List<ModelElement> getIndexedSearchDefinitions(ModelElement clusterElem) {
List<ModelElement> indexedDefs = new ArrayList<>();
ModelElement docElem = clusterElem.child("documents");
if (docElem == null) {
return indexedDefs;
}
for (ModelElement docType : docElem.subElements("document")) {
String mode = docType.stringAttribute("mode");
if ("index".equals(mode)) {
indexedDefs.add(docType);
}
}
return indexedDefs;
}
} | class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> {
private final Map<String, NewDocumentType> documentDefinitions;
private final Set<NewDocumentType> globallyDistributedDocuments;
public Builder(Map<String, NewDocumentType> documentDefinitions,
Set<NewDocumentType> globallyDistributedDocuments) {
this.documentDefinitions = documentDefinitions;
this.globallyDistributedDocuments = globallyDistributedDocuments;
}
@Override
protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
ModelElement clusterElem = new ModelElement(producerSpec);
String clusterName = ContentCluster.getClusterName(clusterElem);
Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown");
ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, documentDefinitions, globallyDistributedDocuments,
getFlushOnShutdown(flushOnShutdownElem, deployState));
ModelElement tuning = clusterElem.childByPath("engine.proton.tuning");
if (tuning != null) {
search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml()));
}
ModelElement protonElem = clusterElem.childByPath("engine.proton");
if (protonElem != null) {
search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem));
}
buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search);
buildIndexedSearchCluster(deployState, clusterElem, clusterName, search);
return search;
}
private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) {
if (flushOnShutdownElem != null) {
return flushOnShutdownElem;
}
return ! stateIsHosted(deployState);
}
private Double getQueryTimeout(ModelElement clusterElem) {
return clusterElem.childAsDouble("engine.proton.query-timeout");
}
private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) {
ModelElement docElem = clusterElem.child("documents");
if (docElem == null) {
return;
}
for (ModelElement docType : docElem.subElements("document")) {
String mode = docType.stringAttribute("mode");
if ("streaming".equals(mode)) {
buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType);
}
}
}
private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName,
ContentSearchCluster search, ModelElement docType) {
String docTypeName = docType.stringAttribute("type");
StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName);
search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType));
}
private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem,
String clusterName, ContentSearchCluster search) {
List<ModelElement> indexedDefs = getIndexedSearchDefinitions(clusterElem);
if (!indexedDefs.isEmpty()) {
IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState);
isc.setRoutingSelector(clusterElem.childAsString("documents.selection"));
Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay");
if (visibilityDelay != null) {
isc.setVisibilityDelay(visibilityDelay);
}
search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs);
}
}
private List<ModelElement> getIndexedSearchDefinitions(ModelElement clusterElem) {
List<ModelElement> indexedDefs = new ArrayList<>();
ModelElement docElem = clusterElem.child("documents");
if (docElem == null) {
return indexedDefs;
}
for (ModelElement docType : docElem.subElements("document")) {
String mode = docType.stringAttribute("mode");
if ("index".equals(mode)) {
indexedDefs.add(docType);
}
}
return indexedDefs;
}
} |
No, it is not. This is itself a scaling factor, and does not rely on default in def file. I just did it as it was done earlier on. | public void getConfig(ProtonConfig.Builder builder) {
double visibilityDelay = hasIndexedCluster() ? getIndexed().getVisibilityDelay() : 0.0;
builder.feeding.concurrency(0.35);
boolean hasAnyNonIndexedCluster = false;
for (NewDocumentType type : TopologicalDocumentTypeSorter.sort(documentDefinitions.values())) {
ProtonConfig.Documentdb.Builder ddbB = new ProtonConfig.Documentdb.Builder();
String docTypeName = type.getFullName().getName();
boolean globalDocType = isGloballyDistributed(type);
ddbB.inputdoctypename(docTypeName)
.configid(getConfigId())
.visibilitydelay(visibilityDelay)
.global(globalDocType);
Optional<StreamingSearchCluster> ssc = findStreamingCluster(docTypeName);
if (ssc.isPresent()) {
hasAnyNonIndexedCluster = true;
ddbB.inputdoctypename(type.getFullName().getName())
.configid(ssc.get().getDocumentDBConfigId())
.mode(ProtonConfig.Documentdb.Mode.Enum.STREAMING)
.feeding.concurrency(0.0);
} else if (hasIndexedCluster()) {
if (getIndexed().hasDocumentDB(type.getFullName().getName())) {
getIndexed().fillDocumentDBConfig(type.getFullName().getName(), ddbB);
if (tuning != null && tuning.searchNode != null && tuning.searchNode.feeding != null) {
ddbB.feeding.concurrency(tuning.searchNode.feeding.concurrency / 2);
} else {
ddbB.feeding.concurrency(builder.feeding.build().concurrency());
}
} else {
hasAnyNonIndexedCluster = true;
ddbB.feeding.concurrency(0.0);
ddbB.mode(ProtonConfig.Documentdb.Mode.Enum.STORE_ONLY);
}
} else {
hasAnyNonIndexedCluster = true;
ddbB.feeding.concurrency(0.0);
ddbB.mode(ProtonConfig.Documentdb.Mode.Enum.STORE_ONLY);
}
if (globalDocType) {
ddbB.visibilitydelay(0.0);
}
builder.documentdb(ddbB);
}
int numDocumentDbs = builder.documentdb.size();
builder.initialize(new ProtonConfig.Initialize.Builder().threads(numDocumentDbs + 1));
if (resourceLimits.isPresent()) {
resourceLimits.get().getConfig(builder);
}
if (tuning != null) {
tuning.getConfig(builder);
}
if (redundancy != null) {
redundancy.getConfig(builder);
}
if (hasAnyNonIndexedCluster) {
builder.feeding.concurrency(builder.feeding.build().concurrency() * 2);
}
} | builder.feeding.concurrency(0.35); | public void getConfig(ProtonConfig.Builder builder) {
double visibilityDelay = hasIndexedCluster() ? getIndexed().getVisibilityDelay() : 0.0;
builder.feeding.concurrency(0.35);
boolean hasAnyNonIndexedCluster = false;
for (NewDocumentType type : TopologicalDocumentTypeSorter.sort(documentDefinitions.values())) {
ProtonConfig.Documentdb.Builder ddbB = new ProtonConfig.Documentdb.Builder();
String docTypeName = type.getFullName().getName();
boolean globalDocType = isGloballyDistributed(type);
ddbB.inputdoctypename(docTypeName)
.configid(getConfigId())
.visibilitydelay(visibilityDelay)
.global(globalDocType);
Optional<StreamingSearchCluster> ssc = findStreamingCluster(docTypeName);
if (ssc.isPresent()) {
hasAnyNonIndexedCluster = true;
ddbB.inputdoctypename(type.getFullName().getName())
.configid(ssc.get().getDocumentDBConfigId())
.mode(ProtonConfig.Documentdb.Mode.Enum.STREAMING)
.feeding.concurrency(0.0);
} else if (hasIndexedCluster()) {
if (getIndexed().hasDocumentDB(type.getFullName().getName())) {
getIndexed().fillDocumentDBConfig(type.getFullName().getName(), ddbB);
if (tuning != null && tuning.searchNode != null && tuning.searchNode.feeding != null) {
ddbB.feeding.concurrency(tuning.searchNode.feeding.concurrency / 2);
} else {
ddbB.feeding.concurrency(builder.feeding.build().concurrency());
}
} else {
hasAnyNonIndexedCluster = true;
ddbB.feeding.concurrency(0.0);
ddbB.mode(ProtonConfig.Documentdb.Mode.Enum.STORE_ONLY);
}
} else {
hasAnyNonIndexedCluster = true;
ddbB.feeding.concurrency(0.0);
ddbB.mode(ProtonConfig.Documentdb.Mode.Enum.STORE_ONLY);
}
if (globalDocType) {
ddbB.visibilitydelay(0.0);
}
builder.documentdb(ddbB);
}
int numDocumentDbs = builder.documentdb.size();
builder.initialize(new ProtonConfig.Initialize.Builder().threads(numDocumentDbs + 1));
if (resourceLimits.isPresent()) {
resourceLimits.get().getConfig(builder);
}
if (tuning != null) {
tuning.getConfig(builder);
}
if (redundancy != null) {
redundancy.getConfig(builder);
}
if (hasAnyNonIndexedCluster) {
builder.feeding.concurrency(builder.feeding.build().concurrency() * 2);
}
} | class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> {
private final Map<String, NewDocumentType> documentDefinitions;
private final Set<NewDocumentType> globallyDistributedDocuments;
public Builder(Map<String, NewDocumentType> documentDefinitions,
Set<NewDocumentType> globallyDistributedDocuments) {
this.documentDefinitions = documentDefinitions;
this.globallyDistributedDocuments = globallyDistributedDocuments;
}
@Override
protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
ModelElement clusterElem = new ModelElement(producerSpec);
String clusterName = ContentCluster.getClusterName(clusterElem);
Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown");
ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, documentDefinitions, globallyDistributedDocuments,
getFlushOnShutdown(flushOnShutdownElem, deployState));
ModelElement tuning = clusterElem.childByPath("engine.proton.tuning");
if (tuning != null) {
search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml()));
}
ModelElement protonElem = clusterElem.childByPath("engine.proton");
if (protonElem != null) {
search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem));
}
buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search);
buildIndexedSearchCluster(deployState, clusterElem, clusterName, search);
return search;
}
private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) {
if (flushOnShutdownElem != null) {
return flushOnShutdownElem;
}
return ! stateIsHosted(deployState);
}
private Double getQueryTimeout(ModelElement clusterElem) {
return clusterElem.childAsDouble("engine.proton.query-timeout");
}
private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) {
ModelElement docElem = clusterElem.child("documents");
if (docElem == null) {
return;
}
for (ModelElement docType : docElem.subElements("document")) {
String mode = docType.stringAttribute("mode");
if ("streaming".equals(mode)) {
buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType);
}
}
}
private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName,
ContentSearchCluster search, ModelElement docType) {
String docTypeName = docType.stringAttribute("type");
StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName);
search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType));
}
private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem,
String clusterName, ContentSearchCluster search) {
List<ModelElement> indexedDefs = getIndexedSearchDefinitions(clusterElem);
if (!indexedDefs.isEmpty()) {
IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState);
isc.setRoutingSelector(clusterElem.childAsString("documents.selection"));
Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay");
if (visibilityDelay != null) {
isc.setVisibilityDelay(visibilityDelay);
}
search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs);
}
}
private List<ModelElement> getIndexedSearchDefinitions(ModelElement clusterElem) {
List<ModelElement> indexedDefs = new ArrayList<>();
ModelElement docElem = clusterElem.child("documents");
if (docElem == null) {
return indexedDefs;
}
for (ModelElement docType : docElem.subElements("document")) {
String mode = docType.stringAttribute("mode");
if ("index".equals(mode)) {
indexedDefs.add(docType);
}
}
return indexedDefs;
}
} | class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> {
private final Map<String, NewDocumentType> documentDefinitions;
private final Set<NewDocumentType> globallyDistributedDocuments;
public Builder(Map<String, NewDocumentType> documentDefinitions,
Set<NewDocumentType> globallyDistributedDocuments) {
this.documentDefinitions = documentDefinitions;
this.globallyDistributedDocuments = globallyDistributedDocuments;
}
@Override
protected ContentSearchCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
ModelElement clusterElem = new ModelElement(producerSpec);
String clusterName = ContentCluster.getClusterName(clusterElem);
Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown");
ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, documentDefinitions, globallyDistributedDocuments,
getFlushOnShutdown(flushOnShutdownElem, deployState));
ModelElement tuning = clusterElem.childByPath("engine.proton.tuning");
if (tuning != null) {
search.setTuning(new DomSearchTuningBuilder().build(deployState, search, tuning.getXml()));
}
ModelElement protonElem = clusterElem.childByPath("engine.proton");
if (protonElem != null) {
search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem));
}
buildAllStreamingSearchClusters(deployState, clusterElem, clusterName, search);
buildIndexedSearchCluster(deployState, clusterElem, clusterName, search);
return search;
}
private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) {
if (flushOnShutdownElem != null) {
return flushOnShutdownElem;
}
return ! stateIsHosted(deployState);
}
private Double getQueryTimeout(ModelElement clusterElem) {
return clusterElem.childAsDouble("engine.proton.query-timeout");
}
private void buildAllStreamingSearchClusters(DeployState deployState, ModelElement clusterElem, String clusterName, ContentSearchCluster search) {
ModelElement docElem = clusterElem.child("documents");
if (docElem == null) {
return;
}
for (ModelElement docType : docElem.subElements("document")) {
String mode = docType.stringAttribute("mode");
if ("streaming".equals(mode)) {
buildStreamingSearchCluster(deployState, clusterElem, clusterName, search, docType);
}
}
}
private void buildStreamingSearchCluster(DeployState deployState, ModelElement clusterElem, String clusterName,
ContentSearchCluster search, ModelElement docType) {
String docTypeName = docType.stringAttribute("type");
StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName);
search.addSearchCluster(deployState, cluster, getQueryTimeout(clusterElem), Arrays.asList(docType));
}
private void buildIndexedSearchCluster(DeployState deployState, ModelElement clusterElem,
String clusterName, ContentSearchCluster search) {
List<ModelElement> indexedDefs = getIndexedSearchDefinitions(clusterElem);
if (!indexedDefs.isEmpty()) {
IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0, deployState);
isc.setRoutingSelector(clusterElem.childAsString("documents.selection"));
Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay");
if (visibilityDelay != null) {
isc.setVisibilityDelay(visibilityDelay);
}
search.addSearchCluster(deployState, isc, getQueryTimeout(clusterElem), indexedDefs);
}
}
private List<ModelElement> getIndexedSearchDefinitions(ModelElement clusterElem) {
List<ModelElement> indexedDefs = new ArrayList<>();
ModelElement docElem = clusterElem.child("documents");
if (docElem == null) {
return indexedDefs;
}
for (ModelElement docType : docElem.subElements("document")) {
String mode = docType.stringAttribute("mode");
if ("index".equals(mode)) {
indexedDefs.add(docType);
}
}
return indexedDefs;
}
} |
Replace "Endpoint responded with a HTTP response code we did not expect" with "Unexpected HTTP response code from endpoint" - there is no we ... | private ThreadState cycle(ThreadState threadState) {
switch(threadState) {
case DISCONNECTED:
try {
if (! client.connect()) {
log.log(Level.WARNING, "Could not connect to endpoint: '" + endpoint + "'. Will re-try.");
drainFirstDocumentsInQueueIfOld();
return ThreadState.DISCONNECTED;
}
return ThreadState.CONNECTED;
} catch (Throwable throwable1) {
drainFirstDocumentsInQueueIfOld();
log.log(Level.INFO, "Failed connecting to endpoint: '" + endpoint
+ "'. Will re-try connecting. Failed with '" + Exceptions.toMessageString(throwable1) + "'",throwable1);
executeProblemsCounter.incrementAndGet();
return ThreadState.DISCONNECTED;
}
case CONNECTED:
try {
client.handshake();
successfulHandshakes.getAndIncrement();
} catch (ServerResponseException ser) {
executeProblemsCounter.incrementAndGet();
log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint
+ "' failed. Will re-try handshake. Failed with '" + Exceptions.toMessageString(ser) + "'",ser);
drainFirstDocumentsInQueueIfOld();
resultQueue.onEndpointError(new FeedProtocolException(ser.getResponseCode(), ser.getResponseString(), ser, endpoint));
return ThreadState.CONNECTED;
} catch (Throwable throwable) {
executeProblemsCounter.incrementAndGet();
resultQueue.onEndpointError(new FeedConnectException(throwable, endpoint));
log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint
+ "' failed. Will re-try handshake. Failed with '" + Exceptions.toMessageString(throwable) + "'",throwable);
drainFirstDocumentsInQueueIfOld();
client.close();
return ThreadState.DISCONNECTED;
}
return ThreadState.SESSION_SYNCED;
case SESSION_SYNCED:
try {
ProcessResponse processResponse = pullAndProcessData(1);
gatewayThrottler.handleCall(processResponse.transitiveErrorCount);
}
catch (ServerResponseException ser) {
log.log(Level.INFO, "Problems while handing data over to endpoint '" + endpoint
+ "'. Will re-try. Endpoint responded with a HTTP response code we did not expect. '"
+ Exceptions.toMessageString(ser) + "'",ser);
return ThreadState.CONNECTED;
}
catch (Throwable e) {
log.log(Level.INFO, "Problems while handing data over to endpoint '" + endpoint
+ "'. Will re-try. Connection level error. Failed with '" + Exceptions.toMessageString(e) + "'", e);
client.close();
return ThreadState.DISCONNECTED;
}
return ThreadState.SESSION_SYNCED;
default: {
log.severe("Should never get here.");
client.close();
return ThreadState.DISCONNECTED;
}
}
} | + "'. Will re-try. Endpoint responded with a HTTP response code we did not expect. '" | private ThreadState cycle(ThreadState threadState) {
switch(threadState) {
case DISCONNECTED:
try {
if (! client.connect()) {
log.log(Level.WARNING, "Could not connect to endpoint: '" + endpoint + "'. Will re-try.");
drainFirstDocumentsInQueueIfOld();
return ThreadState.DISCONNECTED;
}
return ThreadState.CONNECTED;
} catch (Throwable throwable1) {
drainFirstDocumentsInQueueIfOld();
log.log(Level.INFO, "Failed connecting to endpoint: '" + endpoint
+ "'. Will re-try connecting. Failed with '" + Exceptions.toMessageString(throwable1) + "'",throwable1);
executeProblemsCounter.incrementAndGet();
return ThreadState.DISCONNECTED;
}
case CONNECTED:
try {
client.handshake();
successfulHandshakes.getAndIncrement();
} catch (ServerResponseException ser) {
executeProblemsCounter.incrementAndGet();
log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint
+ "' failed. Will re-try handshake. Failed with '" + Exceptions.toMessageString(ser) + "'",ser);
drainFirstDocumentsInQueueIfOld();
resultQueue.onEndpointError(new FeedProtocolException(ser.getResponseCode(), ser.getResponseString(), ser, endpoint));
return ThreadState.CONNECTED;
} catch (Throwable throwable) {
executeProblemsCounter.incrementAndGet();
resultQueue.onEndpointError(new FeedConnectException(throwable, endpoint));
log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint
+ "' failed. Will re-try handshake. Failed with '" + Exceptions.toMessageString(throwable) + "'",throwable);
drainFirstDocumentsInQueueIfOld();
client.close();
return ThreadState.DISCONNECTED;
}
return ThreadState.SESSION_SYNCED;
case SESSION_SYNCED:
try {
ProcessResponse processResponse = pullAndProcessData(1);
gatewayThrottler.handleCall(processResponse.transitiveErrorCount);
}
catch (ServerResponseException ser) {
log.log(Level.INFO, "Problems while handing data over to endpoint '" + endpoint
+ "'. Will re-try. Endpoint responded with an unexpected HTTP response code. '"
+ Exceptions.toMessageString(ser) + "'",ser);
return ThreadState.CONNECTED;
}
catch (Throwable e) {
log.log(Level.INFO, "Problems while handing data over to endpoint '" + endpoint
+ "'. Will re-try. Connection level error. Failed with '" + Exceptions.toMessageString(e) + "'", e);
client.close();
return ThreadState.DISCONNECTED;
}
return ThreadState.SESSION_SYNCED;
default: {
log.severe("Should never get here.");
client.close();
return ThreadState.DISCONNECTED;
}
}
} | class ProcessResponse {
private final int transitiveErrorCount;
private final int processResultsCount;
ProcessResponse(int transitiveErrorCount, int processResultsCount) {
this.transitiveErrorCount = transitiveErrorCount;
this.processResultsCount = processResultsCount;
}
} | class ProcessResponse {
private final int transitiveErrorCount;
private final int processResultsCount;
ProcessResponse(int transitiveErrorCount, int processResultsCount) {
this.transitiveErrorCount = transitiveErrorCount;
this.processResultsCount = processResultsCount;
}
} |
Should this have been `n=6789`? Doesn't seem to have broken anything, so maybe not tested at all... | public Progress process(Processing processing) {
ListIterator<DocumentOperation> it = processing.getDocumentOperations().listIterator();
while (it.hasNext()) {
DocumentOperation op = it.next();
String id = op.getId().toString();
if ("doc:nodocstatus:put:to:put".equals(id)) {
Document doc = ((DocumentPut)op).getDocument();
doc.setFieldValue("foostring", new StringFieldValue("banana"));
} else if ("doc:nodocstatus:put:to:remove".equals(id)) {
it.set(new DocumentRemove(new DocumentId(id)));
} else if ("doc:nodocstatus:put:to:update".equals(id)) {
it.set(new DocumentUpdate(getType(), id));
} else if ("doc:nodocstatus:put:to:nothing".equals(id)) {
it.remove();
} else if ("doc:nodocstatus:remove:to:put".equals(id)) {
it.set(new DocumentPut(getType(), op.getId()));
} else if ("doc:nodocstatus:remove:to:remove".equals(id)) {
} else if ("doc:nodocstatus:remove:to:update".equals(id)) {
it.set(new DocumentUpdate(getType(), id));
} else if ("doc:nodocstatus:remove:to:nothing".equals(id)) {
it.remove();
} else if ("doc:nodocstatus:update:to:put".equals(id)) {
it.set(new DocumentPut(getType(), op.getId()));
} else if ("doc:nodocstatus:update:to:remove".equals(id)) {
it.set(new DocumentRemove(new DocumentId(id)));
} else if ("doc:nodocstatus:update:to:update".equals(id)) {
} else if ("doc:nodocstatus:update:to:nothing".equals(id)) {
it.remove();
} else if ("id:12345:6789:multiop:nodocstatus:keep:this".equals(id)) {
}
}
return Progress.DONE;
} | } else if ("id:12345:6789:multiop:nodocstatus:keep:this".equals(id)) { | public Progress process(Processing processing) {
ListIterator<DocumentOperation> it = processing.getDocumentOperations().listIterator();
while (it.hasNext()) {
DocumentOperation op = it.next();
String id = op.getId().toString();
if ("doc:nodocstatus:put:to:put".equals(id)) {
Document doc = ((DocumentPut)op).getDocument();
doc.setFieldValue("foostring", new StringFieldValue("banana"));
} else if ("doc:nodocstatus:put:to:remove".equals(id)) {
it.set(new DocumentRemove(new DocumentId(id)));
} else if ("doc:nodocstatus:put:to:update".equals(id)) {
it.set(new DocumentUpdate(getType(), id));
} else if ("doc:nodocstatus:put:to:nothing".equals(id)) {
it.remove();
} else if ("doc:nodocstatus:remove:to:put".equals(id)) {
it.set(new DocumentPut(getType(), op.getId()));
} else if ("doc:nodocstatus:remove:to:remove".equals(id)) {
} else if ("doc:nodocstatus:remove:to:update".equals(id)) {
it.set(new DocumentUpdate(getType(), id));
} else if ("doc:nodocstatus:remove:to:nothing".equals(id)) {
it.remove();
} else if ("doc:nodocstatus:update:to:put".equals(id)) {
it.set(new DocumentPut(getType(), op.getId()));
} else if ("doc:nodocstatus:update:to:remove".equals(id)) {
it.set(new DocumentRemove(new DocumentId(id)));
} else if ("doc:nodocstatus:update:to:update".equals(id)) {
} else if ("doc:nodocstatus:update:to:nothing".equals(id)) {
it.remove();
} else if ("id:12345:6789:multiop:nodocstatus:keep:this".equals(id)) {
}
}
return Progress.DONE;
} | class TransformingDocumentProcessor extends DocumentProcessor {
@Override
} | class TransformingDocumentProcessor extends DocumentProcessor {
@Override
} |
Good point, will check. | public Progress process(Processing processing) {
ListIterator<DocumentOperation> it = processing.getDocumentOperations().listIterator();
while (it.hasNext()) {
DocumentOperation op = it.next();
String id = op.getId().toString();
if ("doc:nodocstatus:put:to:put".equals(id)) {
Document doc = ((DocumentPut)op).getDocument();
doc.setFieldValue("foostring", new StringFieldValue("banana"));
} else if ("doc:nodocstatus:put:to:remove".equals(id)) {
it.set(new DocumentRemove(new DocumentId(id)));
} else if ("doc:nodocstatus:put:to:update".equals(id)) {
it.set(new DocumentUpdate(getType(), id));
} else if ("doc:nodocstatus:put:to:nothing".equals(id)) {
it.remove();
} else if ("doc:nodocstatus:remove:to:put".equals(id)) {
it.set(new DocumentPut(getType(), op.getId()));
} else if ("doc:nodocstatus:remove:to:remove".equals(id)) {
} else if ("doc:nodocstatus:remove:to:update".equals(id)) {
it.set(new DocumentUpdate(getType(), id));
} else if ("doc:nodocstatus:remove:to:nothing".equals(id)) {
it.remove();
} else if ("doc:nodocstatus:update:to:put".equals(id)) {
it.set(new DocumentPut(getType(), op.getId()));
} else if ("doc:nodocstatus:update:to:remove".equals(id)) {
it.set(new DocumentRemove(new DocumentId(id)));
} else if ("doc:nodocstatus:update:to:update".equals(id)) {
} else if ("doc:nodocstatus:update:to:nothing".equals(id)) {
it.remove();
} else if ("id:12345:6789:multiop:nodocstatus:keep:this".equals(id)) {
}
}
return Progress.DONE;
} | } else if ("id:12345:6789:multiop:nodocstatus:keep:this".equals(id)) { | public Progress process(Processing processing) {
ListIterator<DocumentOperation> it = processing.getDocumentOperations().listIterator();
while (it.hasNext()) {
DocumentOperation op = it.next();
String id = op.getId().toString();
if ("doc:nodocstatus:put:to:put".equals(id)) {
Document doc = ((DocumentPut)op).getDocument();
doc.setFieldValue("foostring", new StringFieldValue("banana"));
} else if ("doc:nodocstatus:put:to:remove".equals(id)) {
it.set(new DocumentRemove(new DocumentId(id)));
} else if ("doc:nodocstatus:put:to:update".equals(id)) {
it.set(new DocumentUpdate(getType(), id));
} else if ("doc:nodocstatus:put:to:nothing".equals(id)) {
it.remove();
} else if ("doc:nodocstatus:remove:to:put".equals(id)) {
it.set(new DocumentPut(getType(), op.getId()));
} else if ("doc:nodocstatus:remove:to:remove".equals(id)) {
} else if ("doc:nodocstatus:remove:to:update".equals(id)) {
it.set(new DocumentUpdate(getType(), id));
} else if ("doc:nodocstatus:remove:to:nothing".equals(id)) {
it.remove();
} else if ("doc:nodocstatus:update:to:put".equals(id)) {
it.set(new DocumentPut(getType(), op.getId()));
} else if ("doc:nodocstatus:update:to:remove".equals(id)) {
it.set(new DocumentRemove(new DocumentId(id)));
} else if ("doc:nodocstatus:update:to:update".equals(id)) {
} else if ("doc:nodocstatus:update:to:nothing".equals(id)) {
it.remove();
} else if ("id:12345:6789:multiop:nodocstatus:keep:this".equals(id)) {
}
}
return Progress.DONE;
} | class TransformingDocumentProcessor extends DocumentProcessor {
@Override
} | class TransformingDocumentProcessor extends DocumentProcessor {
@Override
} |
Orderdoc support appears removed entirely from the `SelectParser.jj` grammar already; also remove this branch & comment? | public Object evaluate(Context context) {
DocumentId id = context.getDocumentOperation().getId();
if (id == null) {
throw new IllegalStateException("Document has no identifier.");
}
if (field == null) {
return id.toString();
} else if (field.equalsIgnoreCase("scheme")) {
return id.getScheme().getType().toString();
} else if (field.equalsIgnoreCase("namespace")) {
return id.getScheme().getNamespace();
} else if (field.equalsIgnoreCase("specific")) {
return id.getScheme().getNamespaceSpecific();
} else if (field.equalsIgnoreCase("group")) {
if (id.getScheme().hasGroup()) {
return id.getScheme().getGroup();
}
throw new IllegalStateException("Group identifier is null.");
} else if (field.equalsIgnoreCase("user")) {
if (id.getScheme().hasNumber()) {
return id.getScheme().getNumber();
}
throw new IllegalStateException("User identifier is null.");
} else if (field.equalsIgnoreCase("type")) {
if (id.getScheme().hasDocType()) {
return id.getScheme().getDocType();
}
throw new IllegalStateException("Document id doesn't have doc type.");
} else if (field.equalsIgnoreCase("order")) {
} else {
throw new IllegalStateException("Identifier field '" + field + "' is not supported.");
}
return null;
} | public Object evaluate(Context context) {
DocumentId id = context.getDocumentOperation().getId();
if (id == null) {
throw new IllegalStateException("Document has no identifier.");
}
if (field == null) {
return id.toString();
} else if (field.equalsIgnoreCase("scheme")) {
return id.getScheme().getType().toString();
} else if (field.equalsIgnoreCase("namespace")) {
return id.getScheme().getNamespace();
} else if (field.equalsIgnoreCase("specific")) {
return id.getScheme().getNamespaceSpecific();
} else if (field.equalsIgnoreCase("group")) {
if (id.getScheme().hasGroup()) {
return id.getScheme().getGroup();
}
throw new IllegalStateException("Group identifier is null.");
} else if (field.equalsIgnoreCase("user")) {
if (id.getScheme().hasNumber()) {
return id.getScheme().getNumber();
}
throw new IllegalStateException("User identifier is null.");
} else if (field.equalsIgnoreCase("type")) {
if (id.getScheme().hasDocType()) {
return id.getScheme().getDocType();
}
throw new IllegalStateException("Document id doesn't have doc type.");
} else {
throw new IllegalStateException("Identifier field '" + field + "' is not supported.");
}
} | class IdNode implements ExpressionNode {
private String field;
private short widthBits = -1;
private short divisionBits = -1;
public IdNode() {
}
public String getField() {
return field;
}
public IdNode setField(String field) {
this.field = field;
return this;
}
public IdNode setWidthBits(short widthBits) {
this.widthBits = widthBits;
return this;
}
public IdNode setDivisionBits(short divisionBits) {
this.divisionBits = divisionBits;
return this;
}
public short getWidthBits() {
return widthBits;
}
public short getDivisionBits() {
return divisionBits;
}
@Override
public BucketSet getBucketSet(BucketIdFactory factory) {
return null;
}
@Override
@Override
public void accept(Visitor visitor) {
visitor.visit(this);
}
@Override
public String toString() {
return "id" + (field != null ? "." + field : "") + (widthBits != -1 ? "(" + widthBits + "," + divisionBits + ")" : "");
}
} | class IdNode implements ExpressionNode {
private String field;
private short widthBits = -1;
private short divisionBits = -1;
public IdNode() {
}
public String getField() {
return field;
}
public IdNode setField(String field) {
this.field = field;
return this;
}
public IdNode setWidthBits(short widthBits) {
this.widthBits = widthBits;
return this;
}
public IdNode setDivisionBits(short divisionBits) {
this.divisionBits = divisionBits;
return this;
}
@Override
public BucketSet getBucketSet(BucketIdFactory factory) {
return null;
}
@Override
@Override
public void accept(Visitor visitor) {
visitor.visit(this);
}
@Override
public String toString() {
return "id" + (field != null ? "." + field : "") + (widthBits != -1 ? "(" + widthBits + "," + divisionBits + ")" : "");
}
} | |
If so we will have a bug we will be able to spot, instead of a silent one we will never see.. There are tests that inserts fail with wrong Item so I think keeping the error checking in one place is a lot cleaner. | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) { | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} |
Why is the location number changed here? Equivalent test in C++ tests with MSB set. | public void testToBucketId() {
verifyGidToBucketIdMapping("id:ns:mytype:n=1:abc");
verifyGidToBucketIdMapping("id:ns:mytype:n=1000:abc");
verifyGidToBucketIdMapping("id:hsgf:mytype:n=9146744073700000000:dfdfsdfg");
verifyGidToBucketIdMapping("id:ns:mytype:g=somegroup:hmm");
verifyGidToBucketIdMapping("doc:foo:test");
verifyGidToBucketIdMapping("doc:myns:http:
verifyGidToBucketIdMapping("doc:jsrthsdf:a234aleingzldkifvasdfgadf");
} | verifyGidToBucketIdMapping("id:hsgf:mytype:n=9146744073700000000:dfdfsdfg"); | public void testToBucketId() {
verifyGidToBucketIdMapping("id:ns:mytype:n=1:abc");
verifyGidToBucketIdMapping("id:ns:mytype:n=1000:abc");
verifyGidToBucketIdMapping("id:hsgf:mytype:n=9146744073700000000:dfdfsdfg");
verifyGidToBucketIdMapping("id:ns:mytype:g=somegroup:hmm");
verifyGidToBucketIdMapping("doc:foo:test");
verifyGidToBucketIdMapping("doc:myns:http:
verifyGidToBucketIdMapping("doc:jsrthsdf:a234aleingzldkifvasdfgadf");
} | class GlobalIdTestCase {
private final byte[] raw0 = new byte[0];
private final byte[] raw1_0 = new byte[]{(byte) 0};
private final byte[] raw2_11 = new byte[]{(byte) 1, (byte) 1};
private final byte[] raw2_minus1_1 = new byte[]{(byte) -1, (byte) 1};
private final byte[] raw12_1to12 = new byte[]{(byte) 1, (byte) 2, (byte) 3, (byte) 4, (byte) 5, (byte) 6, (byte) 7,
(byte) 8, (byte) 9, (byte) 10, (byte) 11, (byte) 12};
private final byte[] raw13 = new byte[]{(byte) 1, (byte) 2, (byte) 3, (byte) 4, (byte) 5, (byte) 6, (byte) 7,
(byte) 8, (byte) 9, (byte) 10, (byte) 11, (byte) 12, (byte) 13};
private final BucketIdFactory bucketIdFactory = new BucketIdFactory();
@Test
public void testRaw0() {
GlobalId gid = new GlobalId(raw0);
assertEquals(12, gid.getRawId().length);
byte[] raw = gid.getRawId();
for (byte b : raw) {
assertEquals((byte) 0, b);
}
GlobalId gid2 = new GlobalId(raw1_0);
assertEquals(12, gid2.getRawId().length);
byte[] raw2 = gid2.getRawId();
for (byte b : raw2) {
assertEquals((byte) 0, b);
}
assertEquals(gid, gid2);
assertTrue(Arrays.equals(raw, raw2));
assertEquals(gid.hashCode(), gid2.hashCode());
}
@Test
public void testLonger() {
GlobalId gid1 = new GlobalId(raw2_11);
GlobalId gid2 = new GlobalId(raw2_minus1_1);
assertFalse(gid1.equals(gid2));
assertFalse(gid1.hashCode() == gid2.hashCode());
GlobalId gid3 = new GlobalId(raw13);
GlobalId gid4 = new GlobalId(raw12_1to12);
assertEquals(gid3, gid4);
assertEquals(gid3.hashCode(), gid4.hashCode());
}
@Test
public void testCompareTo() {
GlobalId gid0 = new GlobalId(raw1_0);
GlobalId gid11 = new GlobalId(raw2_11);
GlobalId gidminus11 = new GlobalId(raw2_minus1_1);
assertEquals(-1, gid0.compareTo(gid11));
assertEquals(1, gid11.compareTo(gid0));
assertEquals(-1, gid0.compareTo(gidminus11));
assertEquals(1, gidminus11.compareTo(gid0));
assertEquals(-1, gid11.compareTo(gidminus11));
assertEquals(1, gidminus11.compareTo(gid11));
}
private void verifyGidToBucketIdMapping(String idString) {
DocumentId documentId = new DocumentId(idString);
GlobalId globalId = new GlobalId(documentId.getGlobalId());
BucketId bucketIdThroughGlobalId = globalId.toBucketId();
BucketId bucketIdThroughFactory = bucketIdFactory.getBucketId(documentId);
assertEquals(bucketIdThroughFactory, bucketIdThroughGlobalId);
}
@Test
} | class GlobalIdTestCase {
private final byte[] raw0 = new byte[0];
private final byte[] raw1_0 = new byte[]{(byte) 0};
private final byte[] raw2_11 = new byte[]{(byte) 1, (byte) 1};
private final byte[] raw2_minus1_1 = new byte[]{(byte) -1, (byte) 1};
private final byte[] raw12_1to12 = new byte[]{(byte) 1, (byte) 2, (byte) 3, (byte) 4, (byte) 5, (byte) 6, (byte) 7,
(byte) 8, (byte) 9, (byte) 10, (byte) 11, (byte) 12};
private final byte[] raw13 = new byte[]{(byte) 1, (byte) 2, (byte) 3, (byte) 4, (byte) 5, (byte) 6, (byte) 7,
(byte) 8, (byte) 9, (byte) 10, (byte) 11, (byte) 12, (byte) 13};
private final BucketIdFactory bucketIdFactory = new BucketIdFactory();
@Test
public void testRaw0() {
GlobalId gid = new GlobalId(raw0);
assertEquals(12, gid.getRawId().length);
byte[] raw = gid.getRawId();
for (byte b : raw) {
assertEquals((byte) 0, b);
}
GlobalId gid2 = new GlobalId(raw1_0);
assertEquals(12, gid2.getRawId().length);
byte[] raw2 = gid2.getRawId();
for (byte b : raw2) {
assertEquals((byte) 0, b);
}
assertEquals(gid, gid2);
assertTrue(Arrays.equals(raw, raw2));
assertEquals(gid.hashCode(), gid2.hashCode());
}
@Test
public void testLonger() {
GlobalId gid1 = new GlobalId(raw2_11);
GlobalId gid2 = new GlobalId(raw2_minus1_1);
assertFalse(gid1.equals(gid2));
assertFalse(gid1.hashCode() == gid2.hashCode());
GlobalId gid3 = new GlobalId(raw13);
GlobalId gid4 = new GlobalId(raw12_1to12);
assertEquals(gid3, gid4);
assertEquals(gid3.hashCode(), gid4.hashCode());
}
@Test
public void testCompareTo() {
GlobalId gid0 = new GlobalId(raw1_0);
GlobalId gid11 = new GlobalId(raw2_11);
GlobalId gidminus11 = new GlobalId(raw2_minus1_1);
assertEquals(-1, gid0.compareTo(gid11));
assertEquals(1, gid11.compareTo(gid0));
assertEquals(-1, gid0.compareTo(gidminus11));
assertEquals(1, gidminus11.compareTo(gid0));
assertEquals(-1, gid11.compareTo(gidminus11));
assertEquals(1, gidminus11.compareTo(gid11));
}
private void verifyGidToBucketIdMapping(String idString) {
DocumentId documentId = new DocumentId(idString);
GlobalId globalId = new GlobalId(documentId.getGlobalId());
BucketId bucketIdThroughGlobalId = globalId.toBucketId();
BucketId bucketIdThroughFactory = bucketIdFactory.getBucketId(documentId);
assertEquals(bucketIdThroughFactory, bucketIdThroughGlobalId);
}
@Test
} |
Consider adding a comment that this is an unused ordering spec, cf. the `getInt` above | protected boolean doEncode(DocumentMessage obj, DocumentSerializer buf) {
CreateVisitorMessage msg = (CreateVisitorMessage)obj;
encodeString(msg.getLibraryName(), buf);
encodeString(msg.getInstanceId(), buf);
encodeString(msg.getControlDestination(), buf);
encodeString(msg.getDataDestination(), buf);
encodeString(msg.getDocumentSelection(), buf);
buf.putInt(null, msg.getMaxPendingReplyCount());
buf.putInt(null, msg.getBuckets().size());
for (BucketId id : msg.getBuckets()) {
long rawid = id.getRawId();
long reversed = ((rawid >>> 56) & 0x00000000000000FFl) | ((rawid >>> 40) & 0x000000000000FF00l) |
((rawid >>> 24) & 0x0000000000FF0000l) | ((rawid >>> 8) & 0x00000000FF000000l) |
((rawid << 8) & 0x000000FF00000000l) | ((rawid << 24) & 0x0000FF0000000000l) |
((rawid << 40) & 0x00FF000000000000l) | ((rawid << 56) & 0xFF00000000000000l);
buf.putLong(null, reversed);
}
buf.putLong(null, msg.getFromTimestamp());
buf.putLong(null, msg.getToTimestamp());
buf.putByte(null, msg.getVisitRemoves() ? (byte)1 : (byte)0);
encodeString(msg.getFieldSet(), buf);
buf.putByte(null, msg.getVisitInconsistentBuckets() ? (byte)1 : (byte)0);
buf.putInt(null, msg.getParameters().size());
for (Map.Entry<String, byte[]> pairs : msg.getParameters().entrySet()) {
encodeString(pairs.getKey(), buf);
byte[] b = pairs.getValue();
buf.putInt(null, b.length);
buf.put(null, b);
}
buf.putInt(null, 0);
buf.putInt(null, msg.getMaxBucketsPerVisitor());
return encodeBucketSpace(msg.getBucketSpace(), buf);
} | buf.putInt(null, 0); | protected boolean doEncode(DocumentMessage obj, DocumentSerializer buf) {
CreateVisitorMessage msg = (CreateVisitorMessage)obj;
encodeString(msg.getLibraryName(), buf);
encodeString(msg.getInstanceId(), buf);
encodeString(msg.getControlDestination(), buf);
encodeString(msg.getDataDestination(), buf);
encodeString(msg.getDocumentSelection(), buf);
buf.putInt(null, msg.getMaxPendingReplyCount());
buf.putInt(null, msg.getBuckets().size());
for (BucketId id : msg.getBuckets()) {
long rawid = id.getRawId();
long reversed = ((rawid >>> 56) & 0x00000000000000FFl) | ((rawid >>> 40) & 0x000000000000FF00l) |
((rawid >>> 24) & 0x0000000000FF0000l) | ((rawid >>> 8) & 0x00000000FF000000l) |
((rawid << 8) & 0x000000FF00000000l) | ((rawid << 24) & 0x0000FF0000000000l) |
((rawid << 40) & 0x00FF000000000000l) | ((rawid << 56) & 0xFF00000000000000l);
buf.putLong(null, reversed);
}
buf.putLong(null, msg.getFromTimestamp());
buf.putLong(null, msg.getToTimestamp());
buf.putByte(null, msg.getVisitRemoves() ? (byte)1 : (byte)0);
encodeString(msg.getFieldSet(), buf);
buf.putByte(null, msg.getVisitInconsistentBuckets() ? (byte)1 : (byte)0);
buf.putInt(null, msg.getParameters().size());
for (Map.Entry<String, byte[]> pairs : msg.getParameters().entrySet()) {
encodeString(pairs.getKey(), buf);
byte[] b = pairs.getValue();
buf.putInt(null, b.length);
buf.put(null, b);
}
buf.putInt(null, 0);
buf.putInt(null, msg.getMaxBucketsPerVisitor());
return encodeBucketSpace(msg.getBucketSpace(), buf);
} | class CreateVisitorMessageFactory extends DocumentMessageFactory {
protected String decodeBucketSpace(Deserializer deserializer) {
return decodeString(deserializer);
}
@Override
protected DocumentMessage doDecode(DocumentDeserializer buf) {
CreateVisitorMessage msg = new CreateVisitorMessage();
msg.setLibraryName(decodeString(buf));
msg.setInstanceId(decodeString(buf));
msg.setControlDestination(decodeString(buf));
msg.setDataDestination(decodeString(buf));
msg.setDocumentSelection(decodeString(buf));
msg.setMaxPendingReplyCount(buf.getInt(null));
int size = buf.getInt(null);
for (int i = 0; i < size; i++) {
long reversed = buf.getLong(null);
long rawid = ((reversed >>> 56) & 0x00000000000000FFl) | ((reversed >>> 40) & 0x000000000000FF00l) |
((reversed >>> 24) & 0x0000000000FF0000l) | ((reversed >>> 8) & 0x00000000FF000000l) |
((reversed << 8) & 0x000000FF00000000l) | ((reversed << 24) & 0x0000FF0000000000l) |
((reversed << 40) & 0x00FF000000000000l) | ((reversed << 56) & 0xFF00000000000000l);
msg.getBuckets().add(new BucketId(rawid));
}
msg.setFromTimestamp(buf.getLong(null));
msg.setToTimestamp(buf.getLong(null));
msg.setVisitRemoves(buf.getByte(null) == (byte)1);
msg.setFieldSet(decodeString(buf));
msg.setVisitInconsistentBuckets(buf.getByte(null) == (byte)1);
size = buf.getInt(null);
for (int i = 0; i < size; i++) {
String key = decodeString(buf);
int sz = buf.getInt(null);
msg.getParameters().put(key, buf.getBytes(null, sz));
}
buf.getInt(null);
msg.setMaxBucketsPerVisitor(buf.getInt(null));
msg.setBucketSpace(decodeBucketSpace(buf));
return msg;
}
protected boolean encodeBucketSpace(String bucketSpace, DocumentSerializer buf) {
encodeString(bucketSpace, buf);
return true;
}
@Override
} | class CreateVisitorMessageFactory extends DocumentMessageFactory {
protected String decodeBucketSpace(Deserializer deserializer) {
return decodeString(deserializer);
}
@Override
protected DocumentMessage doDecode(DocumentDeserializer buf) {
CreateVisitorMessage msg = new CreateVisitorMessage();
msg.setLibraryName(decodeString(buf));
msg.setInstanceId(decodeString(buf));
msg.setControlDestination(decodeString(buf));
msg.setDataDestination(decodeString(buf));
msg.setDocumentSelection(decodeString(buf));
msg.setMaxPendingReplyCount(buf.getInt(null));
int size = buf.getInt(null);
for (int i = 0; i < size; i++) {
long reversed = buf.getLong(null);
long rawid = ((reversed >>> 56) & 0x00000000000000FFl) | ((reversed >>> 40) & 0x000000000000FF00l) |
((reversed >>> 24) & 0x0000000000FF0000l) | ((reversed >>> 8) & 0x00000000FF000000l) |
((reversed << 8) & 0x000000FF00000000l) | ((reversed << 24) & 0x0000FF0000000000l) |
((reversed << 40) & 0x00FF000000000000l) | ((reversed << 56) & 0xFF00000000000000l);
msg.getBuckets().add(new BucketId(rawid));
}
msg.setFromTimestamp(buf.getLong(null));
msg.setToTimestamp(buf.getLong(null));
msg.setVisitRemoves(buf.getByte(null) == (byte)1);
msg.setFieldSet(decodeString(buf));
msg.setVisitInconsistentBuckets(buf.getByte(null) == (byte)1);
size = buf.getInt(null);
for (int i = 0; i < size; i++) {
String key = decodeString(buf);
int sz = buf.getInt(null);
msg.getParameters().put(key, buf.getBytes(null, sz));
}
buf.getInt(null);
msg.setMaxBucketsPerVisitor(buf.getInt(null));
msg.setBucketSpace(decodeBucketSpace(buf));
return msg;
}
protected boolean encodeBucketSpace(String bucketSpace, DocumentSerializer buf) {
encodeString(bucketSpace, buf);
return true;
}
@Override
} |
Fixed | public Object evaluate(Context context) {
DocumentId id = context.getDocumentOperation().getId();
if (id == null) {
throw new IllegalStateException("Document has no identifier.");
}
if (field == null) {
return id.toString();
} else if (field.equalsIgnoreCase("scheme")) {
return id.getScheme().getType().toString();
} else if (field.equalsIgnoreCase("namespace")) {
return id.getScheme().getNamespace();
} else if (field.equalsIgnoreCase("specific")) {
return id.getScheme().getNamespaceSpecific();
} else if (field.equalsIgnoreCase("group")) {
if (id.getScheme().hasGroup()) {
return id.getScheme().getGroup();
}
throw new IllegalStateException("Group identifier is null.");
} else if (field.equalsIgnoreCase("user")) {
if (id.getScheme().hasNumber()) {
return id.getScheme().getNumber();
}
throw new IllegalStateException("User identifier is null.");
} else if (field.equalsIgnoreCase("type")) {
if (id.getScheme().hasDocType()) {
return id.getScheme().getDocType();
}
throw new IllegalStateException("Document id doesn't have doc type.");
} else if (field.equalsIgnoreCase("order")) {
} else {
throw new IllegalStateException("Identifier field '" + field + "' is not supported.");
}
return null;
} | public Object evaluate(Context context) {
DocumentId id = context.getDocumentOperation().getId();
if (id == null) {
throw new IllegalStateException("Document has no identifier.");
}
if (field == null) {
return id.toString();
} else if (field.equalsIgnoreCase("scheme")) {
return id.getScheme().getType().toString();
} else if (field.equalsIgnoreCase("namespace")) {
return id.getScheme().getNamespace();
} else if (field.equalsIgnoreCase("specific")) {
return id.getScheme().getNamespaceSpecific();
} else if (field.equalsIgnoreCase("group")) {
if (id.getScheme().hasGroup()) {
return id.getScheme().getGroup();
}
throw new IllegalStateException("Group identifier is null.");
} else if (field.equalsIgnoreCase("user")) {
if (id.getScheme().hasNumber()) {
return id.getScheme().getNumber();
}
throw new IllegalStateException("User identifier is null.");
} else if (field.equalsIgnoreCase("type")) {
if (id.getScheme().hasDocType()) {
return id.getScheme().getDocType();
}
throw new IllegalStateException("Document id doesn't have doc type.");
} else {
throw new IllegalStateException("Identifier field '" + field + "' is not supported.");
}
} | class IdNode implements ExpressionNode {
private String field;
private short widthBits = -1;
private short divisionBits = -1;
public IdNode() {
}
public String getField() {
return field;
}
public IdNode setField(String field) {
this.field = field;
return this;
}
public IdNode setWidthBits(short widthBits) {
this.widthBits = widthBits;
return this;
}
public IdNode setDivisionBits(short divisionBits) {
this.divisionBits = divisionBits;
return this;
}
public short getWidthBits() {
return widthBits;
}
public short getDivisionBits() {
return divisionBits;
}
@Override
public BucketSet getBucketSet(BucketIdFactory factory) {
return null;
}
@Override
@Override
public void accept(Visitor visitor) {
visitor.visit(this);
}
@Override
public String toString() {
return "id" + (field != null ? "." + field : "") + (widthBits != -1 ? "(" + widthBits + "," + divisionBits + ")" : "");
}
} | class IdNode implements ExpressionNode {
private String field;
private short widthBits = -1;
private short divisionBits = -1;
public IdNode() {
}
public String getField() {
return field;
}
public IdNode setField(String field) {
this.field = field;
return this;
}
public IdNode setWidthBits(short widthBits) {
this.widthBits = widthBits;
return this;
}
public IdNode setDivisionBits(short divisionBits) {
this.divisionBits = divisionBits;
return this;
}
@Override
public BucketSet getBucketSet(BucketIdFactory factory) {
return null;
}
@Override
@Override
public void accept(Visitor visitor) {
visitor.visit(this);
}
@Override
public String toString() {
return "id" + (field != null ? "." + field : "") + (widthBits != -1 ? "(" + widthBits + "," + divisionBits + ")" : "");
}
} | |
Fixed | protected boolean doEncode(DocumentMessage obj, DocumentSerializer buf) {
CreateVisitorMessage msg = (CreateVisitorMessage)obj;
encodeString(msg.getLibraryName(), buf);
encodeString(msg.getInstanceId(), buf);
encodeString(msg.getControlDestination(), buf);
encodeString(msg.getDataDestination(), buf);
encodeString(msg.getDocumentSelection(), buf);
buf.putInt(null, msg.getMaxPendingReplyCount());
buf.putInt(null, msg.getBuckets().size());
for (BucketId id : msg.getBuckets()) {
long rawid = id.getRawId();
long reversed = ((rawid >>> 56) & 0x00000000000000FFl) | ((rawid >>> 40) & 0x000000000000FF00l) |
((rawid >>> 24) & 0x0000000000FF0000l) | ((rawid >>> 8) & 0x00000000FF000000l) |
((rawid << 8) & 0x000000FF00000000l) | ((rawid << 24) & 0x0000FF0000000000l) |
((rawid << 40) & 0x00FF000000000000l) | ((rawid << 56) & 0xFF00000000000000l);
buf.putLong(null, reversed);
}
buf.putLong(null, msg.getFromTimestamp());
buf.putLong(null, msg.getToTimestamp());
buf.putByte(null, msg.getVisitRemoves() ? (byte)1 : (byte)0);
encodeString(msg.getFieldSet(), buf);
buf.putByte(null, msg.getVisitInconsistentBuckets() ? (byte)1 : (byte)0);
buf.putInt(null, msg.getParameters().size());
for (Map.Entry<String, byte[]> pairs : msg.getParameters().entrySet()) {
encodeString(pairs.getKey(), buf);
byte[] b = pairs.getValue();
buf.putInt(null, b.length);
buf.put(null, b);
}
buf.putInt(null, 0);
buf.putInt(null, msg.getMaxBucketsPerVisitor());
return encodeBucketSpace(msg.getBucketSpace(), buf);
} | buf.putInt(null, 0); | protected boolean doEncode(DocumentMessage obj, DocumentSerializer buf) {
CreateVisitorMessage msg = (CreateVisitorMessage)obj;
encodeString(msg.getLibraryName(), buf);
encodeString(msg.getInstanceId(), buf);
encodeString(msg.getControlDestination(), buf);
encodeString(msg.getDataDestination(), buf);
encodeString(msg.getDocumentSelection(), buf);
buf.putInt(null, msg.getMaxPendingReplyCount());
buf.putInt(null, msg.getBuckets().size());
for (BucketId id : msg.getBuckets()) {
long rawid = id.getRawId();
long reversed = ((rawid >>> 56) & 0x00000000000000FFl) | ((rawid >>> 40) & 0x000000000000FF00l) |
((rawid >>> 24) & 0x0000000000FF0000l) | ((rawid >>> 8) & 0x00000000FF000000l) |
((rawid << 8) & 0x000000FF00000000l) | ((rawid << 24) & 0x0000FF0000000000l) |
((rawid << 40) & 0x00FF000000000000l) | ((rawid << 56) & 0xFF00000000000000l);
buf.putLong(null, reversed);
}
buf.putLong(null, msg.getFromTimestamp());
buf.putLong(null, msg.getToTimestamp());
buf.putByte(null, msg.getVisitRemoves() ? (byte)1 : (byte)0);
encodeString(msg.getFieldSet(), buf);
buf.putByte(null, msg.getVisitInconsistentBuckets() ? (byte)1 : (byte)0);
buf.putInt(null, msg.getParameters().size());
for (Map.Entry<String, byte[]> pairs : msg.getParameters().entrySet()) {
encodeString(pairs.getKey(), buf);
byte[] b = pairs.getValue();
buf.putInt(null, b.length);
buf.put(null, b);
}
buf.putInt(null, 0);
buf.putInt(null, msg.getMaxBucketsPerVisitor());
return encodeBucketSpace(msg.getBucketSpace(), buf);
} | class CreateVisitorMessageFactory extends DocumentMessageFactory {
protected String decodeBucketSpace(Deserializer deserializer) {
return decodeString(deserializer);
}
@Override
protected DocumentMessage doDecode(DocumentDeserializer buf) {
CreateVisitorMessage msg = new CreateVisitorMessage();
msg.setLibraryName(decodeString(buf));
msg.setInstanceId(decodeString(buf));
msg.setControlDestination(decodeString(buf));
msg.setDataDestination(decodeString(buf));
msg.setDocumentSelection(decodeString(buf));
msg.setMaxPendingReplyCount(buf.getInt(null));
int size = buf.getInt(null);
for (int i = 0; i < size; i++) {
long reversed = buf.getLong(null);
long rawid = ((reversed >>> 56) & 0x00000000000000FFl) | ((reversed >>> 40) & 0x000000000000FF00l) |
((reversed >>> 24) & 0x0000000000FF0000l) | ((reversed >>> 8) & 0x00000000FF000000l) |
((reversed << 8) & 0x000000FF00000000l) | ((reversed << 24) & 0x0000FF0000000000l) |
((reversed << 40) & 0x00FF000000000000l) | ((reversed << 56) & 0xFF00000000000000l);
msg.getBuckets().add(new BucketId(rawid));
}
msg.setFromTimestamp(buf.getLong(null));
msg.setToTimestamp(buf.getLong(null));
msg.setVisitRemoves(buf.getByte(null) == (byte)1);
msg.setFieldSet(decodeString(buf));
msg.setVisitInconsistentBuckets(buf.getByte(null) == (byte)1);
size = buf.getInt(null);
for (int i = 0; i < size; i++) {
String key = decodeString(buf);
int sz = buf.getInt(null);
msg.getParameters().put(key, buf.getBytes(null, sz));
}
buf.getInt(null);
msg.setMaxBucketsPerVisitor(buf.getInt(null));
msg.setBucketSpace(decodeBucketSpace(buf));
return msg;
}
protected boolean encodeBucketSpace(String bucketSpace, DocumentSerializer buf) {
encodeString(bucketSpace, buf);
return true;
}
@Override
} | class CreateVisitorMessageFactory extends DocumentMessageFactory {
protected String decodeBucketSpace(Deserializer deserializer) {
return decodeString(deserializer);
}
@Override
protected DocumentMessage doDecode(DocumentDeserializer buf) {
CreateVisitorMessage msg = new CreateVisitorMessage();
msg.setLibraryName(decodeString(buf));
msg.setInstanceId(decodeString(buf));
msg.setControlDestination(decodeString(buf));
msg.setDataDestination(decodeString(buf));
msg.setDocumentSelection(decodeString(buf));
msg.setMaxPendingReplyCount(buf.getInt(null));
int size = buf.getInt(null);
for (int i = 0; i < size; i++) {
long reversed = buf.getLong(null);
long rawid = ((reversed >>> 56) & 0x00000000000000FFl) | ((reversed >>> 40) & 0x000000000000FF00l) |
((reversed >>> 24) & 0x0000000000FF0000l) | ((reversed >>> 8) & 0x00000000FF000000l) |
((reversed << 8) & 0x000000FF00000000l) | ((reversed << 24) & 0x0000FF0000000000l) |
((reversed << 40) & 0x00FF000000000000l) | ((reversed << 56) & 0xFF00000000000000l);
msg.getBuckets().add(new BucketId(rawid));
}
msg.setFromTimestamp(buf.getLong(null));
msg.setToTimestamp(buf.getLong(null));
msg.setVisitRemoves(buf.getByte(null) == (byte)1);
msg.setFieldSet(decodeString(buf));
msg.setVisitInconsistentBuckets(buf.getByte(null) == (byte)1);
size = buf.getInt(null);
for (int i = 0; i < size; i++) {
String key = decodeString(buf);
int sz = buf.getInt(null);
msg.getParameters().put(key, buf.getBytes(null, sz));
}
buf.getInt(null);
msg.setMaxBucketsPerVisitor(buf.getInt(null));
msg.setBucketSpace(decodeBucketSpace(buf));
return msg;
}
protected boolean encodeBucketSpace(String bucketSpace, DocumentSerializer buf) {
encodeString(bucketSpace, buf);
return true;
}
@Override
} |
Current limitations is that it must be parseable as Long. | public void testToBucketId() {
verifyGidToBucketIdMapping("id:ns:mytype:n=1:abc");
verifyGidToBucketIdMapping("id:ns:mytype:n=1000:abc");
verifyGidToBucketIdMapping("id:hsgf:mytype:n=9146744073700000000:dfdfsdfg");
verifyGidToBucketIdMapping("id:ns:mytype:g=somegroup:hmm");
verifyGidToBucketIdMapping("doc:foo:test");
verifyGidToBucketIdMapping("doc:myns:http:
verifyGidToBucketIdMapping("doc:jsrthsdf:a234aleingzldkifvasdfgadf");
} | verifyGidToBucketIdMapping("id:hsgf:mytype:n=9146744073700000000:dfdfsdfg"); | public void testToBucketId() {
verifyGidToBucketIdMapping("id:ns:mytype:n=1:abc");
verifyGidToBucketIdMapping("id:ns:mytype:n=1000:abc");
verifyGidToBucketIdMapping("id:hsgf:mytype:n=9146744073700000000:dfdfsdfg");
verifyGidToBucketIdMapping("id:ns:mytype:g=somegroup:hmm");
verifyGidToBucketIdMapping("doc:foo:test");
verifyGidToBucketIdMapping("doc:myns:http:
verifyGidToBucketIdMapping("doc:jsrthsdf:a234aleingzldkifvasdfgadf");
} | class GlobalIdTestCase {
private final byte[] raw0 = new byte[0];
private final byte[] raw1_0 = new byte[]{(byte) 0};
private final byte[] raw2_11 = new byte[]{(byte) 1, (byte) 1};
private final byte[] raw2_minus1_1 = new byte[]{(byte) -1, (byte) 1};
private final byte[] raw12_1to12 = new byte[]{(byte) 1, (byte) 2, (byte) 3, (byte) 4, (byte) 5, (byte) 6, (byte) 7,
(byte) 8, (byte) 9, (byte) 10, (byte) 11, (byte) 12};
private final byte[] raw13 = new byte[]{(byte) 1, (byte) 2, (byte) 3, (byte) 4, (byte) 5, (byte) 6, (byte) 7,
(byte) 8, (byte) 9, (byte) 10, (byte) 11, (byte) 12, (byte) 13};
private final BucketIdFactory bucketIdFactory = new BucketIdFactory();
@Test
public void testRaw0() {
GlobalId gid = new GlobalId(raw0);
assertEquals(12, gid.getRawId().length);
byte[] raw = gid.getRawId();
for (byte b : raw) {
assertEquals((byte) 0, b);
}
GlobalId gid2 = new GlobalId(raw1_0);
assertEquals(12, gid2.getRawId().length);
byte[] raw2 = gid2.getRawId();
for (byte b : raw2) {
assertEquals((byte) 0, b);
}
assertEquals(gid, gid2);
assertTrue(Arrays.equals(raw, raw2));
assertEquals(gid.hashCode(), gid2.hashCode());
}
@Test
public void testLonger() {
GlobalId gid1 = new GlobalId(raw2_11);
GlobalId gid2 = new GlobalId(raw2_minus1_1);
assertFalse(gid1.equals(gid2));
assertFalse(gid1.hashCode() == gid2.hashCode());
GlobalId gid3 = new GlobalId(raw13);
GlobalId gid4 = new GlobalId(raw12_1to12);
assertEquals(gid3, gid4);
assertEquals(gid3.hashCode(), gid4.hashCode());
}
@Test
public void testCompareTo() {
GlobalId gid0 = new GlobalId(raw1_0);
GlobalId gid11 = new GlobalId(raw2_11);
GlobalId gidminus11 = new GlobalId(raw2_minus1_1);
assertEquals(-1, gid0.compareTo(gid11));
assertEquals(1, gid11.compareTo(gid0));
assertEquals(-1, gid0.compareTo(gidminus11));
assertEquals(1, gidminus11.compareTo(gid0));
assertEquals(-1, gid11.compareTo(gidminus11));
assertEquals(1, gidminus11.compareTo(gid11));
}
private void verifyGidToBucketIdMapping(String idString) {
DocumentId documentId = new DocumentId(idString);
GlobalId globalId = new GlobalId(documentId.getGlobalId());
BucketId bucketIdThroughGlobalId = globalId.toBucketId();
BucketId bucketIdThroughFactory = bucketIdFactory.getBucketId(documentId);
assertEquals(bucketIdThroughFactory, bucketIdThroughGlobalId);
}
@Test
} | class GlobalIdTestCase {
private final byte[] raw0 = new byte[0];
private final byte[] raw1_0 = new byte[]{(byte) 0};
private final byte[] raw2_11 = new byte[]{(byte) 1, (byte) 1};
private final byte[] raw2_minus1_1 = new byte[]{(byte) -1, (byte) 1};
private final byte[] raw12_1to12 = new byte[]{(byte) 1, (byte) 2, (byte) 3, (byte) 4, (byte) 5, (byte) 6, (byte) 7,
(byte) 8, (byte) 9, (byte) 10, (byte) 11, (byte) 12};
private final byte[] raw13 = new byte[]{(byte) 1, (byte) 2, (byte) 3, (byte) 4, (byte) 5, (byte) 6, (byte) 7,
(byte) 8, (byte) 9, (byte) 10, (byte) 11, (byte) 12, (byte) 13};
private final BucketIdFactory bucketIdFactory = new BucketIdFactory();
@Test
public void testRaw0() {
GlobalId gid = new GlobalId(raw0);
assertEquals(12, gid.getRawId().length);
byte[] raw = gid.getRawId();
for (byte b : raw) {
assertEquals((byte) 0, b);
}
GlobalId gid2 = new GlobalId(raw1_0);
assertEquals(12, gid2.getRawId().length);
byte[] raw2 = gid2.getRawId();
for (byte b : raw2) {
assertEquals((byte) 0, b);
}
assertEquals(gid, gid2);
assertTrue(Arrays.equals(raw, raw2));
assertEquals(gid.hashCode(), gid2.hashCode());
}
@Test
public void testLonger() {
GlobalId gid1 = new GlobalId(raw2_11);
GlobalId gid2 = new GlobalId(raw2_minus1_1);
assertFalse(gid1.equals(gid2));
assertFalse(gid1.hashCode() == gid2.hashCode());
GlobalId gid3 = new GlobalId(raw13);
GlobalId gid4 = new GlobalId(raw12_1to12);
assertEquals(gid3, gid4);
assertEquals(gid3.hashCode(), gid4.hashCode());
}
@Test
public void testCompareTo() {
GlobalId gid0 = new GlobalId(raw1_0);
GlobalId gid11 = new GlobalId(raw2_11);
GlobalId gidminus11 = new GlobalId(raw2_minus1_1);
assertEquals(-1, gid0.compareTo(gid11));
assertEquals(1, gid11.compareTo(gid0));
assertEquals(-1, gid0.compareTo(gidminus11));
assertEquals(1, gidminus11.compareTo(gid0));
assertEquals(-1, gid11.compareTo(gidminus11));
assertEquals(1, gidminus11.compareTo(gid11));
}
private void verifyGidToBucketIdMapping(String idString) {
DocumentId documentId = new DocumentId(idString);
GlobalId globalId = new GlobalId(documentId.getGlobalId());
BucketId bucketIdThroughGlobalId = globalId.toBucketId();
BucketId bucketIdThroughFactory = bucketIdFactory.getBucketId(documentId);
assertEquals(bucketIdThroughFactory, bucketIdThroughGlobalId);
}
@Test
} |
No need to check for SimpleIndexedItem here as that is required to be a TermItem insertion time. | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) { | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} |
I am not sure why it was limited to TermItem initially. I remember we had some discussions about it. I will redo them and see if we can relax requirement. | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) { | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} |
Yes but if that ever changes we have a new latent bug ... It's close in the code though, so I don't feel strongly about it. | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) { | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} |
We'd only have a bug if sameElement with one child doesn't work in the backend. Anyway, fine :-) | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) { | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} |
You will get a silent performance degradation.... Which was the reason for introducing the extractSingleChild :) I checked with the backend and saw that only TermItems were handled. So before allowing anything else I need to verify with @havardpe if could allow at least weghted set. | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | if (getItemCount() == 1 && getItem(0) instanceof SimpleIndexedItem) { | public Optional<Item> extractSingleChild() {
if (getItemCount() == 1) {
SimpleIndexedItem child = (SimpleIndexedItem)getItem(0);
child.setIndexName(getFieldName() + "." + child.getIndexName());
return Optional.of(child);
}
return Optional.empty();
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} | class SameElementItem extends NonReducibleCompositeItem {
private final String fieldName;
public SameElementItem(String commonPath) {
Validator.ensureNonEmpty("Field name", commonPath);
this.fieldName = commonPath;
}
@Override
protected void encodeThis(ByteBuffer buffer) {
super.encodeThis(buffer);
putString(fieldName, buffer);
}
@Override
protected void appendHeadingString(StringBuilder buffer) { }
@Override
protected void appendBodyString(StringBuilder buffer) {
buffer.append(fieldName).append(':');
buffer.append('{');
for (Iterator<Item> i = getItemIterator(); i.hasNext();) {
TermItem term = (TermItem) i.next();
buffer.append(term.getIndexName()).append(':').append(term.getIndexedString());
if (i.hasNext()) {
buffer.append(' ');
}
}
buffer.append('}');
}
@Override
protected void adding(Item item) {
super.adding(item);
Validator.ensureInstanceOf("Child item", item, TermItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
@Override
@Override
public ItemType getItemType() {
return ItemType.SAME_ELEMENT;
}
@Override
public String getName() {
return getItemType().toString();
}
public String getFieldName() { return fieldName; }
} |
s/bale/able/ | public void testMappedDocAPI() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(mapped.getFieldValue("title"), doc.getFieldValue("title"));
assertEquals(mapped.getFieldValue(new com.yahoo.document.Field("title")), doc.getFieldValue((new com.yahoo.document.Field("title"))));
mapped.setFieldValue("title", "foo");
assertEquals(doc.getFieldValue("title").getWrappedValue(), "foo");
assertEquals(mapped.getWrappedDocumentOperation().getId().toString(), "id:map:album::1");
assertEquals(doc, mapped);
assertEquals(doc.toString(), mapped.toString());
assertEquals(doc.hashCode(), mapped.hashCode());
assertEquals(doc.clone(), mapped.clone());
assertEquals(doc.iterator().hasNext(), mapped.iterator().hasNext());
assertEquals(doc.getId(), mapped.getId());
assertEquals(doc.getDataType(), mapped.getDataType());
mapped.setLastModified(56l);
assertEquals(doc.getLastModified(), (Long)56l);
assertEquals(mapped.getLastModified(), (Long)56l);
mapped.setId(new DocumentId("id:map:album::2"));
assertEquals(mapped.getId().toString(), "id:map:album::2");
assertEquals(doc.getId().toString(), "id:map:album::2");
assertEquals(doc.getHeader(), mapped.getHeader());
assertEquals(doc.getBody(), mapped.getBody());
assertEquals(doc.getSerializedSize(), mapped.getSerializedSize());
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ByteArrayOutputStream bos2 = new ByteArrayOutputStream();
mapped.serialize(bos);
doc.serialize(bos2);
assertEquals(bos.toString(), bos2.toString());
assertEquals(mapped.toXml(), doc.toXml());
assertEquals(mapped.getFieldCount(), doc.getFieldCount());
assertTrue(mapped.getDocument()==doc);
mapped.clear();
assertNull(mapped.getFieldValue("title"));
assertNull(doc.getFieldValue("title"));
try {
mapped.setDataType(new DocumentType("newType"));
fail("Should not be bale to set new type");
} catch (IllegalArgumentException e) {
assertEquals("Trying to set a document type (newType) that doesn't match the document id (id:map:album::2).", e.getMessage());
}
assertEquals(doc.getDataType().getName(), "album");
} | fail("Should not be bale to set new type"); | public void testMappedDocAPI() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(mapped.getFieldValue("title"), doc.getFieldValue("title"));
assertEquals(mapped.getFieldValue(new com.yahoo.document.Field("title")), doc.getFieldValue((new com.yahoo.document.Field("title"))));
mapped.setFieldValue("title", "foo");
assertEquals(doc.getFieldValue("title").getWrappedValue(), "foo");
assertEquals(mapped.getWrappedDocumentOperation().getId().toString(), "id:map:album::1");
assertEquals(doc, mapped);
assertEquals(doc.toString(), mapped.toString());
assertEquals(doc.hashCode(), mapped.hashCode());
assertEquals(doc.clone(), mapped.clone());
assertEquals(doc.iterator().hasNext(), mapped.iterator().hasNext());
assertEquals(doc.getId(), mapped.getId());
assertEquals(doc.getDataType(), mapped.getDataType());
mapped.setLastModified(56l);
assertEquals(doc.getLastModified(), (Long)56l);
assertEquals(mapped.getLastModified(), (Long)56l);
mapped.setId(new DocumentId("id:map:album::2"));
assertEquals(mapped.getId().toString(), "id:map:album::2");
assertEquals(doc.getId().toString(), "id:map:album::2");
assertEquals(doc.getHeader(), mapped.getHeader());
assertEquals(doc.getBody(), mapped.getBody());
assertEquals(doc.getSerializedSize(), mapped.getSerializedSize());
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ByteArrayOutputStream bos2 = new ByteArrayOutputStream();
mapped.serialize(bos);
doc.serialize(bos2);
assertEquals(bos.toString(), bos2.toString());
assertEquals(mapped.toXml(), doc.toXml());
assertEquals(mapped.getFieldCount(), doc.getFieldCount());
assertTrue(mapped.getDocument()==doc);
mapped.clear();
assertNull(mapped.getFieldValue("title"));
assertNull(doc.getFieldValue("title"));
try {
mapped.setDataType(new DocumentType("newType"));
fail("Should not be able to set new type");
} catch (IllegalArgumentException e) {
assertEquals("Trying to set a document type (newType) that doesn't match the document id (id:map:album::2).", e.getMessage());
}
assertEquals(doc.getDataType().getName(), "album");
} | class SchemaMappingAndAccessesTest {
private Document getDoc() {
DocumentType type = new DocumentType("album");
AnnotationType personType = new AnnotationType("person");
Annotation person = new Annotation(personType);
type.addField("title", DataType.STRING);
type.addField("artist", DataType.STRING);
type.addField("guitarist", DataType.STRING);
type.addField("year", DataType.INT);
type.addField("labels", DataType.getArray(DataType.STRING));
Document doc = new Document(type, new DocumentId("id:map:album::1"));
doc.setFieldValue("title", new StringFieldValue("Black Rock"));
StringFieldValue joe = new StringFieldValue("Joe Bonamassa");
joe.setSpanTree(new SpanTree("mytree").annotate(person));
doc.setFieldValue("artist", joe);
doc.setFieldValue("year", new IntegerFieldValue(2010));
Array<StringFieldValue> labels = new Array<>(type.getField("labels").getDataType());
labels.add(new StringFieldValue("audun"));
labels.add(new StringFieldValue("tylden"));
doc.setFieldValue("labels", labels);
StructDataType personStructType = new StructDataType("artist");
personStructType.addField(new com.yahoo.document.Field("firstname", DataType.STRING));
personStructType.addField(new com.yahoo.document.Field("lastname", DataType.STRING));
type.addField("listeners", DataType.getArray(personStructType));
Array<Struct> listeners = new Array<>(type.getField("listeners").getDataType());
Struct listenerOne = new Struct(personStructType);
listenerOne.setFieldValue("firstname", new StringFieldValue("per"));
listenerOne.setFieldValue("lastname", new StringFieldValue("olsen"));
Struct listenerTwo = new Struct(personStructType);
listenerTwo.setFieldValue("firstname", new StringFieldValue("anders"));
listenerTwo.setFieldValue("lastname", new StringFieldValue("and"));
listeners.add(listenerOne);
listeners.add(listenerTwo);
doc.setFieldValue("listeners", listeners);
return doc;
}
@Test
public void testMappingArrays() {
Document doc = getDoc();
DocumentProcessor proc = new TestMappingArrayProcessor();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("label", "labels[0]");
ProxyDocument mapped = new ProxyDocument(proc, doc, fieldMap);
Processing p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<StringFieldValue>) doc.getFieldValue("labels")).size());
assertEquals(new StringFieldValue("EMI"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(0));
assertEquals(new StringFieldValue("tylden"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(1));
fieldMap.clear();
fieldMap.put("label", "labels[2]");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<StringFieldValue>) doc.getFieldValue("labels")).size());
assertEquals(new StringFieldValue("EMI"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(0));
assertEquals(new StringFieldValue("tylden"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(1));
}
@Test
public void testMappingStructsInArrays() {
Document doc = getDoc();
DocumentProcessor proc = new TestMappingStructInArrayProcessor();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("name", "listeners[0].firstname");
ProxyDocument mapped = new ProxyDocument(proc, doc, fieldMap);
Processing p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertEquals("and", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname")).getString()));
fieldMap.clear();
fieldMap.put("name", "listeners[2].firstname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertEquals("and", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname")).getString()));
proc = new TestRemovingMappingStructInArrayProcessor();
fieldMap.clear();
fieldMap.put("name", "listeners[1].lastname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertNull(((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname"));
fieldMap.clear();
fieldMap.put("name", "listeners[2].lastname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertNull(((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname"));
}
@Test
public void testMappingSpanTrees() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
fieldMap.put("g", "guitarist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
Iterator<SpanTree> itSpanTreesDoc = ((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator();
Iterator<Annotation> itAnnotDoc = itSpanTreesDoc.next().iterator();
Iterator<SpanTree> itSpanTreesMapped = ((StringFieldValue) mapped.getFieldValue("artist")).getSpanTrees().iterator();
Iterator<Annotation> itAnnotMapped = itSpanTreesMapped.next().iterator();
assertEquals(itAnnotDoc.next().getType().getName(), "person");
assertFalse(itAnnotDoc.hasNext());
assertEquals(itAnnotMapped.next().getType().getName(), "person");
assertFalse(itAnnotMapped.hasNext());
AnnotationType guitaristType = new AnnotationType("guitarist");
Annotation guitarist = new Annotation(guitaristType);
StringFieldValue bona = new StringFieldValue("Bonamassa");
bona.setSpanTree(new SpanTree("mytree").annotate(guitarist));
StringFieldValue clapton = new StringFieldValue("Clapton");
mapped.setFieldValue("a", bona);
mapped.setFieldValue("g", clapton);
itSpanTreesDoc = ((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator();
itAnnotDoc = itSpanTreesDoc.next().iterator();
itSpanTreesMapped = ((StringFieldValue) mapped.getFieldValue("artist")).getSpanTrees().iterator();
itAnnotMapped = itSpanTreesMapped.next().iterator();
assertEquals(itAnnotDoc.next().getType().getName(), "guitarist");
assertFalse(itAnnotDoc.hasNext());
assertEquals(itAnnotMapped.next().getType().getName(), "guitarist");
assertFalse(itAnnotMapped.hasNext());
assertSame(((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator().next(), ((StringFieldValue) mapped.getFieldValue("a")).getSpanTrees().iterator().next());
}
@Test
public void testMappedDoc() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(new StringFieldValue("Black Rock"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("Joe Bonamassa").getWrappedValue(), mapped.getFieldValue("a").getWrappedValue());
mapped.setFieldValue("t", new StringFieldValue("The Ballad Of John Henry"));
StringFieldValue bona = new StringFieldValue("Bonamassa");
mapped.setFieldValue("a", bona);
assertEquals(new StringFieldValue("The Ballad Of John Henry"), doc.getFieldValue("title"));
assertEquals(new StringFieldValue("The Ballad Of John Henry"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("Bonamassa"), doc.getFieldValue("artist"));
assertEquals(new StringFieldValue("Bonamassa"), mapped.getFieldValue("a"));
mapped.setFieldValue("a", mapped.getFieldValue("a") + "Hughes");
assertEquals(new StringFieldValue("BonamassaHughes"), mapped.getFieldValue("a"));
StringFieldValue unmapped1 = (StringFieldValue) doc.getFieldValue("artist");
StringFieldValue unmapped2 = (StringFieldValue) doc.getFieldValue("artist");
assertTrue(unmapped1==unmapped2);
unmapped1.setSpanTree(new SpanTree("test"));
assertEquals(unmapped2.getSpanTree("test").getName(), "test");
StringFieldValue mapped1 = (StringFieldValue) mapped.getFieldValue("a");
mapped1.setSpanTree(new SpanTree("test2"));
StringFieldValue mapped2 = (StringFieldValue) mapped.getFieldValue("a");
assertTrue(mapped1==mapped2);
assertEquals(mapped2.getSpanTree("test2").getName(), "test2");
mapped.removeFieldValue("a");
assertEquals(mapped.getFieldValue("a"), null);
mapped.removeFieldValue(mapped.getField("t"));
assertEquals(mapped.getFieldValue("t"), null);
mapped.setFieldValue("a", new StringFieldValue("Bonamassa"));
assertEquals(new StringFieldValue("Bonamassa"), doc.getFieldValue("artist"));
mapped.removeFieldValue("a");
assertEquals(mapped.getFieldValue("a"), null);
}
@Test
@SuppressWarnings("deprecation")
@Test
public void testMappedDocUpdateAPI() {
Document doc = getDoc();
DocumentType type = doc.getDataType();
DocumentUpdate dud = new DocumentUpdate(type, new DocumentId("id:map:album::1"));
com.yahoo.document.Field title = type.getField("title");
FieldUpdate assignSingle = FieldUpdate.createAssign(title, new StringFieldValue("something"));
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocumentUpdate pup = new ProxyDocumentUpdate(dud, fieldMap);
pup.addFieldUpdate(assignSingle);
assertEquals(pup.fieldUpdates().toString(), dud.fieldUpdates().toString());
assertEquals(pup.getDocumentType(), dud.getDocumentType());
assertEquals(pup.getFieldUpdate(title).size(), 1);
assertEquals(pup.getFieldUpdate(title), dud.fieldUpdates().iterator().next());
assertEquals(pup.getFieldUpdate("title"), dud.getFieldUpdate("title"));
assertEquals(pup.getId(), dud.getId());
assertEquals(pup.getType(), dud.getType());
assertEquals(pup.applyTo(doc), dud);
assertEquals(doc.getFieldValue("title").getWrappedValue(), "something");
assertEquals(pup, dud);
assertEquals(pup.hashCode(), dud.hashCode());
assertEquals(pup.toString(), dud.toString());
assertEquals(pup.size(), dud.size());
assertEquals(pup.getWrappedDocumentOperation().getId().toString(), "id:map:album::1");
}
@Test
public void testMappedDocStruct() {
StructDataType materialsStructType = new StructDataType("materialstype");
materialsStructType.addField(new com.yahoo.document.Field("ceiling", DataType.STRING));
materialsStructType.addField(new com.yahoo.document.Field("walls", DataType.STRING));
DocumentType docType = new DocumentType("album");
docType.addField("title", DataType.STRING);
docType.addField("artist", DataType.STRING);
StructDataType storeStructType = new StructDataType("storetype");
storeStructType.addField(new com.yahoo.document.Field("name", DataType.STRING));
storeStructType.addField(new com.yahoo.document.Field("city", DataType.STRING));
storeStructType.addField(new com.yahoo.document.Field("materials", materialsStructType));
docType.addField("store", storeStructType);
Document doc = new Document(docType, new DocumentId("id:map:album::1"));
doc.setFieldValue("title", new StringFieldValue("Black Rock"));
doc.setFieldValue("artist", new StringFieldValue("Joe Bonamassa"));
Struct material = new Struct(materialsStructType);
material.setFieldValue("ceiling", new StringFieldValue("wood"));
material.setFieldValue("walls", new StringFieldValue("brick"));
Struct store = new Struct(storeStructType);
store.setFieldValue("name", new StringFieldValue("Platekompaniet"));
store.setFieldValue("city", new StringFieldValue("Trondheim"));
store.setFieldValue(storeStructType.getField("materials"), material);
doc.setFieldValue(docType.getField("store"), store);
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("c", "store.city");
fieldMap.put("w", "store.materials.walls");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(new StringFieldValue("Trondheim"), mapped.getFieldValue("c"));
assertEquals(new StringFieldValue("Black Rock"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("brick"), mapped.getFieldValue("w"));
assertEquals(new StringFieldValue("brick"), material.getFieldValue("walls"));
mapped.setFieldValue("c", new StringFieldValue("Steinkjer"));
mapped.setFieldValue("w", new StringFieldValue("plaster"));
assertEquals(new StringFieldValue("plaster"), mapped.getFieldValue("w"));
assertEquals(new StringFieldValue("plaster"), material.getFieldValue("walls"));
assertEquals(new StringFieldValue("Steinkjer"), store.getFieldValue("city"));
assertEquals(new StringFieldValue("Steinkjer"), mapped.getFieldValue("c"));
assertEquals(new StringFieldValue("Steinkjer"), mapped.getFieldValue("c"));
mapped.setFieldValue("c", new StringFieldValue("Levanger"));
assertEquals(new StringFieldValue("Levanger"), store.getFieldValue("city"));
assertEquals(new StringFieldValue("Levanger"), mapped.getFieldValue("c"));
mapped.setFieldValue("c", mapped.getFieldValue("c") + "Kommune");
assertEquals(new StringFieldValue("LevangerKommune"), mapped.getFieldValue("c"));
}
@Test
public void testSchemaMap() {
SchemaMap map = new SchemaMap();
map.addMapping("mychain", "com.yahoo.MyDocProc", "mydoctype", "inDoc1", "inProc1");
map.addMapping("mychain", "com.yahoo.MyDocProc", "mydoctype", "inDoc2", "inProc2");
Map<Pair<String, String>, String> cMap = map.chainMap("mychain", "com.yahoo.MyDocProc");
assertEquals("inDoc1", cMap.get(new Pair<>("mydoctype", "inProc1")));
assertEquals("inDoc2", cMap.get(new Pair<>("mydoctype", "inProc2")));
assertNull(cMap.get(new Pair<>("invalidtype", "inProc2")));
Map<Pair<String, String>, String> noMap = map.chainMap("invalidchain", "com.yahoo.MyDocProc");
Map<Pair<String, String>, String> noMap2 = map.chainMap("mychain", "com.yahoo.MyInvalidDocProc");
assertTrue(noMap.isEmpty());
assertTrue(noMap2.isEmpty());
DocumentProcessor proc = new TestDocumentProcessor1();
proc.setFieldMap(cMap);
Map<String, String> dMap = proc.getDocMap("mydoctype");
assertEquals("inDoc1", dMap.get("inProc1"));
assertEquals("inDoc2", dMap.get("inProc2"));
}
@Test
public void testSchemaMapKey() {
SchemaMap map = new SchemaMap();
SchemaMap.SchemaMapKey key1 = map.new SchemaMapKey("chain", "docproc", "doctype", "from");
SchemaMap.SchemaMapKey key1_1 = map.new SchemaMapKey("chain", "docproc", "doctype", "from");
SchemaMap.SchemaMapKey key2 = map.new SchemaMapKey("chain", "docproc", "doctype2", "from");
assertTrue(key1.equals(key1_1));
assertFalse(key1.equals(key2));
}
@Test
public void testSchemaMapConfig() {
SchemaMap map = new SchemaMap();
SchemamappingConfig.Builder scb = new SchemamappingConfig.Builder();
scb.fieldmapping(new SchemamappingConfig.Fieldmapping.Builder().chain("mychain").docproc("mydocproc").doctype("mydoctype").
indocument("myindoc").inprocessor("myinprocessor"));
map.configure(new SchemamappingConfig(scb));
assertEquals(map.chainMap("mychain", "mydocproc").get(new Pair<>("mydoctype", "myinprocessor")), "myindoc");
}
@Test
public void testSchemaMapNoDocType() {
SchemaMap map = new SchemaMap();
map.addMapping("mychain", "com.yahoo.MyDocProc", null, "inDoc1", "inProc1");
map.addMapping("mychain", "com.yahoo.MyDocProc", null, "inDoc2", "inProc2");
Map<Pair<String, String>, String> cMap = map.chainMap("mychain", "com.yahoo.MyDocProc");
DocumentProcessor proc = new TestDocumentProcessor1();
proc.setFieldMap(cMap);
Map<String, String> dMap = proc.getDocMap("mydoctype");
assertEquals("inDoc1", dMap.get("inProc1"));
assertEquals("inDoc2", dMap.get("inProc2"));
}
@Test
public void testProxyAndSecure() {
DocumentProcessor procOK = new TestDPSecure();
Map<Pair<String, String>, String> fieldMap = new HashMap<>();
fieldMap.put(new Pair<>("album", "titleMapped"), "title");
procOK.setFieldMap(fieldMap);
DocumentPut put = new DocumentPut(getDoc());
Document proxyDoc = new Call(procOK).configDoc(procOK, put).getDocument();
procOK.process(Processing.of(new DocumentPut(proxyDoc)));
assertEquals(proxyDoc.getFieldValue("title").toString(), "MyTitle MyTitle");
}
@Test
public void testProxyAndSecureSecureFailing() {
DocumentProcessor procInsecure = new TestDPInsecure();
Map<Pair<String, String>, String> fieldMap = new HashMap<>();
fieldMap.put(new Pair<>("album", "titleMapped"), "title");
procInsecure.setFieldMap(fieldMap);
DocumentPut put = new DocumentPut(getDoc());
Document doc = new Call(procInsecure).configDoc(procInsecure, put).getDocument();
try {
procInsecure.process(Processing.of(new DocumentPut(doc)));
fail("Insecure docproc went through");
} catch (Exception e) {
assertTrue(e.getMessage().matches(".*allowed.*"));
}
}
/**
* To make it less likely to break schema mapping, we enforce that ProxyDocument does wrap every public
* non-static, non-final method on Document and StructuredFieldValue
*/
@Test
public void testVerifyProxyDocumentOverridesEverything() {
List<Method> allPublicFromProxyDocument = new ArrayList<>();
for (Method m : ProxyDocument.class.getDeclaredMethods()) {
if (Modifier.isPublic(m.getModifiers())) {
allPublicFromProxyDocument.add(m);
}
}
List<Method> allPublicFromDoc = new ArrayList<>();
for (Method m : Document.class.getDeclaredMethods()) {
if (mustBeOverriddenInProxyDocument(m)) {
allPublicFromDoc.add(m);
}
}
for (Method m : StructuredFieldValue.class.getDeclaredMethods()) {
if (mustBeOverriddenInProxyDocument(m)) {
allPublicFromDoc.add(m);
}
}
for (Method m : allPublicFromDoc) {
boolean thisOneOk=false;
for (Method pdM : allPublicFromProxyDocument) {
if (sameNameAndParams(m, pdM)) thisOneOk=true;
}
if (!thisOneOk) {
throw new RuntimeException("ProxyDocument must override all public methods from Document. " +
"Missing: '"+m+"'. If the method doesn't need field mapping or @Accesses check, just " +
"override it and delegate the call to 'doc'.");
}
}
}
private boolean mustBeOverriddenInProxyDocument(Method m) {
if (!Modifier.isPublic(m.getModifiers())) return false;
if (Modifier.isStatic(m.getModifiers())) return false;
if (Modifier.isFinal(m.getModifiers())) return false;
return true;
}
private boolean sameNameAndParams(Method m1, Method m2) {
if (!m1.getName().equals(m2.getName())) return false;
if (m1.getParameterTypes().length!=m2.getParameterTypes().length) return false;
for (int i = 0; i<m1.getParameterTypes().length; i++) {
if (!m1.getParameterTypes()[i].equals(m2.getParameterTypes()[i])) return false;
}
return true;
}
@Accesses(value = { @Field(dataType = "String", description = "", name = "titleMapped") })
public static class TestDPSecure extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("titleMapped", new StringFieldValue("MyTitle"));
document.setFieldValue("titleMapped", new StringFieldValue(document.getFieldValue("titleMapped").toString() + " MyTitle"));
return Progress.DONE;
}
}
@Accesses(value = { @Field(dataType = "String", description = "", name = "titleMappedFoo") })
public static class TestDPInsecure extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("titleMapped", new StringFieldValue("MyTitle"));
document.setFieldValue("titleMapped", new StringFieldValue(document.getFieldValue("titleMapped").toString() + " MyTitle"));
return Progress.DONE;
}
}
public static class TestMappingArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("label", new StringFieldValue("EMI"));
return Progress.DONE;
}
}
public static class TestMappingStructInArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();;
document.setFieldValue("name", new StringFieldValue("peter"));
return Progress.DONE;
}
}
public static class TestRemovingMappingStructInArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.removeFieldValue("name");
return Progress.DONE;
}
}
} | class SchemaMappingAndAccessesTest {
private Document getDoc() {
DocumentType type = new DocumentType("album");
AnnotationType personType = new AnnotationType("person");
Annotation person = new Annotation(personType);
type.addField("title", DataType.STRING);
type.addField("artist", DataType.STRING);
type.addField("guitarist", DataType.STRING);
type.addField("year", DataType.INT);
type.addField("labels", DataType.getArray(DataType.STRING));
Document doc = new Document(type, new DocumentId("id:map:album::1"));
doc.setFieldValue("title", new StringFieldValue("Black Rock"));
StringFieldValue joe = new StringFieldValue("Joe Bonamassa");
joe.setSpanTree(new SpanTree("mytree").annotate(person));
doc.setFieldValue("artist", joe);
doc.setFieldValue("year", new IntegerFieldValue(2010));
Array<StringFieldValue> labels = new Array<>(type.getField("labels").getDataType());
labels.add(new StringFieldValue("audun"));
labels.add(new StringFieldValue("tylden"));
doc.setFieldValue("labels", labels);
StructDataType personStructType = new StructDataType("artist");
personStructType.addField(new com.yahoo.document.Field("firstname", DataType.STRING));
personStructType.addField(new com.yahoo.document.Field("lastname", DataType.STRING));
type.addField("listeners", DataType.getArray(personStructType));
Array<Struct> listeners = new Array<>(type.getField("listeners").getDataType());
Struct listenerOne = new Struct(personStructType);
listenerOne.setFieldValue("firstname", new StringFieldValue("per"));
listenerOne.setFieldValue("lastname", new StringFieldValue("olsen"));
Struct listenerTwo = new Struct(personStructType);
listenerTwo.setFieldValue("firstname", new StringFieldValue("anders"));
listenerTwo.setFieldValue("lastname", new StringFieldValue("and"));
listeners.add(listenerOne);
listeners.add(listenerTwo);
doc.setFieldValue("listeners", listeners);
return doc;
}
@Test
public void testMappingArrays() {
Document doc = getDoc();
DocumentProcessor proc = new TestMappingArrayProcessor();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("label", "labels[0]");
ProxyDocument mapped = new ProxyDocument(proc, doc, fieldMap);
Processing p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<StringFieldValue>) doc.getFieldValue("labels")).size());
assertEquals(new StringFieldValue("EMI"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(0));
assertEquals(new StringFieldValue("tylden"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(1));
fieldMap.clear();
fieldMap.put("label", "labels[2]");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<StringFieldValue>) doc.getFieldValue("labels")).size());
assertEquals(new StringFieldValue("EMI"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(0));
assertEquals(new StringFieldValue("tylden"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(1));
}
@Test
public void testMappingStructsInArrays() {
Document doc = getDoc();
DocumentProcessor proc = new TestMappingStructInArrayProcessor();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("name", "listeners[0].firstname");
ProxyDocument mapped = new ProxyDocument(proc, doc, fieldMap);
Processing p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertEquals("and", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname")).getString()));
fieldMap.clear();
fieldMap.put("name", "listeners[2].firstname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertEquals("and", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname")).getString()));
proc = new TestRemovingMappingStructInArrayProcessor();
fieldMap.clear();
fieldMap.put("name", "listeners[1].lastname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertNull(((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname"));
fieldMap.clear();
fieldMap.put("name", "listeners[2].lastname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertNull(((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname"));
}
@Test
public void testMappingSpanTrees() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
fieldMap.put("g", "guitarist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
Iterator<SpanTree> itSpanTreesDoc = ((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator();
Iterator<Annotation> itAnnotDoc = itSpanTreesDoc.next().iterator();
Iterator<SpanTree> itSpanTreesMapped = ((StringFieldValue) mapped.getFieldValue("artist")).getSpanTrees().iterator();
Iterator<Annotation> itAnnotMapped = itSpanTreesMapped.next().iterator();
assertEquals(itAnnotDoc.next().getType().getName(), "person");
assertFalse(itAnnotDoc.hasNext());
assertEquals(itAnnotMapped.next().getType().getName(), "person");
assertFalse(itAnnotMapped.hasNext());
AnnotationType guitaristType = new AnnotationType("guitarist");
Annotation guitarist = new Annotation(guitaristType);
StringFieldValue bona = new StringFieldValue("Bonamassa");
bona.setSpanTree(new SpanTree("mytree").annotate(guitarist));
StringFieldValue clapton = new StringFieldValue("Clapton");
mapped.setFieldValue("a", bona);
mapped.setFieldValue("g", clapton);
itSpanTreesDoc = ((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator();
itAnnotDoc = itSpanTreesDoc.next().iterator();
itSpanTreesMapped = ((StringFieldValue) mapped.getFieldValue("artist")).getSpanTrees().iterator();
itAnnotMapped = itSpanTreesMapped.next().iterator();
assertEquals(itAnnotDoc.next().getType().getName(), "guitarist");
assertFalse(itAnnotDoc.hasNext());
assertEquals(itAnnotMapped.next().getType().getName(), "guitarist");
assertFalse(itAnnotMapped.hasNext());
assertSame(((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator().next(), ((StringFieldValue) mapped.getFieldValue("a")).getSpanTrees().iterator().next());
}
@Test
public void testMappedDoc() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(new StringFieldValue("Black Rock"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("Joe Bonamassa").getWrappedValue(), mapped.getFieldValue("a").getWrappedValue());
mapped.setFieldValue("t", new StringFieldValue("The Ballad Of John Henry"));
StringFieldValue bona = new StringFieldValue("Bonamassa");
mapped.setFieldValue("a", bona);
assertEquals(new StringFieldValue("The Ballad Of John Henry"), doc.getFieldValue("title"));
assertEquals(new StringFieldValue("The Ballad Of John Henry"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("Bonamassa"), doc.getFieldValue("artist"));
assertEquals(new StringFieldValue("Bonamassa"), mapped.getFieldValue("a"));
mapped.setFieldValue("a", mapped.getFieldValue("a") + "Hughes");
assertEquals(new StringFieldValue("BonamassaHughes"), mapped.getFieldValue("a"));
StringFieldValue unmapped1 = (StringFieldValue) doc.getFieldValue("artist");
StringFieldValue unmapped2 = (StringFieldValue) doc.getFieldValue("artist");
assertTrue(unmapped1==unmapped2);
unmapped1.setSpanTree(new SpanTree("test"));
assertEquals(unmapped2.getSpanTree("test").getName(), "test");
StringFieldValue mapped1 = (StringFieldValue) mapped.getFieldValue("a");
mapped1.setSpanTree(new SpanTree("test2"));
StringFieldValue mapped2 = (StringFieldValue) mapped.getFieldValue("a");
assertTrue(mapped1==mapped2);
assertEquals(mapped2.getSpanTree("test2").getName(), "test2");
mapped.removeFieldValue("a");
assertEquals(mapped.getFieldValue("a"), null);
mapped.removeFieldValue(mapped.getField("t"));
assertEquals(mapped.getFieldValue("t"), null);
mapped.setFieldValue("a", new StringFieldValue("Bonamassa"));
assertEquals(new StringFieldValue("Bonamassa"), doc.getFieldValue("artist"));
mapped.removeFieldValue("a");
assertEquals(mapped.getFieldValue("a"), null);
}
@Test
@SuppressWarnings("deprecation")
@Test
public void testMappedDocUpdateAPI() {
Document doc = getDoc();
DocumentType type = doc.getDataType();
DocumentUpdate dud = new DocumentUpdate(type, new DocumentId("id:map:album::1"));
com.yahoo.document.Field title = type.getField("title");
FieldUpdate assignSingle = FieldUpdate.createAssign(title, new StringFieldValue("something"));
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocumentUpdate pup = new ProxyDocumentUpdate(dud, fieldMap);
pup.addFieldUpdate(assignSingle);
assertEquals(pup.fieldUpdates().toString(), dud.fieldUpdates().toString());
assertEquals(pup.getDocumentType(), dud.getDocumentType());
assertEquals(pup.getFieldUpdate(title).size(), 1);
assertEquals(pup.getFieldUpdate(title), dud.fieldUpdates().iterator().next());
assertEquals(pup.getFieldUpdate("title"), dud.getFieldUpdate("title"));
assertEquals(pup.getId(), dud.getId());
assertEquals(pup.getType(), dud.getType());
assertEquals(pup.applyTo(doc), dud);
assertEquals(doc.getFieldValue("title").getWrappedValue(), "something");
assertEquals(pup, dud);
assertEquals(pup.hashCode(), dud.hashCode());
assertEquals(pup.toString(), dud.toString());
assertEquals(pup.size(), dud.size());
assertEquals(pup.getWrappedDocumentOperation().getId().toString(), "id:map:album::1");
}
@Test
public void testMappedDocStruct() {
StructDataType materialsStructType = new StructDataType("materialstype");
materialsStructType.addField(new com.yahoo.document.Field("ceiling", DataType.STRING));
materialsStructType.addField(new com.yahoo.document.Field("walls", DataType.STRING));
DocumentType docType = new DocumentType("album");
docType.addField("title", DataType.STRING);
docType.addField("artist", DataType.STRING);
StructDataType storeStructType = new StructDataType("storetype");
storeStructType.addField(new com.yahoo.document.Field("name", DataType.STRING));
storeStructType.addField(new com.yahoo.document.Field("city", DataType.STRING));
storeStructType.addField(new com.yahoo.document.Field("materials", materialsStructType));
docType.addField("store", storeStructType);
Document doc = new Document(docType, new DocumentId("id:map:album::1"));
doc.setFieldValue("title", new StringFieldValue("Black Rock"));
doc.setFieldValue("artist", new StringFieldValue("Joe Bonamassa"));
Struct material = new Struct(materialsStructType);
material.setFieldValue("ceiling", new StringFieldValue("wood"));
material.setFieldValue("walls", new StringFieldValue("brick"));
Struct store = new Struct(storeStructType);
store.setFieldValue("name", new StringFieldValue("Platekompaniet"));
store.setFieldValue("city", new StringFieldValue("Trondheim"));
store.setFieldValue(storeStructType.getField("materials"), material);
doc.setFieldValue(docType.getField("store"), store);
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("c", "store.city");
fieldMap.put("w", "store.materials.walls");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(new StringFieldValue("Trondheim"), mapped.getFieldValue("c"));
assertEquals(new StringFieldValue("Black Rock"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("brick"), mapped.getFieldValue("w"));
assertEquals(new StringFieldValue("brick"), material.getFieldValue("walls"));
mapped.setFieldValue("c", new StringFieldValue("Steinkjer"));
mapped.setFieldValue("w", new StringFieldValue("plaster"));
assertEquals(new StringFieldValue("plaster"), mapped.getFieldValue("w"));
assertEquals(new StringFieldValue("plaster"), material.getFieldValue("walls"));
assertEquals(new StringFieldValue("Steinkjer"), store.getFieldValue("city"));
assertEquals(new StringFieldValue("Steinkjer"), mapped.getFieldValue("c"));
assertEquals(new StringFieldValue("Steinkjer"), mapped.getFieldValue("c"));
mapped.setFieldValue("c", new StringFieldValue("Levanger"));
assertEquals(new StringFieldValue("Levanger"), store.getFieldValue("city"));
assertEquals(new StringFieldValue("Levanger"), mapped.getFieldValue("c"));
mapped.setFieldValue("c", mapped.getFieldValue("c") + "Kommune");
assertEquals(new StringFieldValue("LevangerKommune"), mapped.getFieldValue("c"));
}
@Test
public void testSchemaMap() {
SchemaMap map = new SchemaMap();
map.addMapping("mychain", "com.yahoo.MyDocProc", "mydoctype", "inDoc1", "inProc1");
map.addMapping("mychain", "com.yahoo.MyDocProc", "mydoctype", "inDoc2", "inProc2");
Map<Pair<String, String>, String> cMap = map.chainMap("mychain", "com.yahoo.MyDocProc");
assertEquals("inDoc1", cMap.get(new Pair<>("mydoctype", "inProc1")));
assertEquals("inDoc2", cMap.get(new Pair<>("mydoctype", "inProc2")));
assertNull(cMap.get(new Pair<>("invalidtype", "inProc2")));
Map<Pair<String, String>, String> noMap = map.chainMap("invalidchain", "com.yahoo.MyDocProc");
Map<Pair<String, String>, String> noMap2 = map.chainMap("mychain", "com.yahoo.MyInvalidDocProc");
assertTrue(noMap.isEmpty());
assertTrue(noMap2.isEmpty());
DocumentProcessor proc = new TestDocumentProcessor1();
proc.setFieldMap(cMap);
Map<String, String> dMap = proc.getDocMap("mydoctype");
assertEquals("inDoc1", dMap.get("inProc1"));
assertEquals("inDoc2", dMap.get("inProc2"));
}
@Test
public void testSchemaMapKey() {
SchemaMap map = new SchemaMap();
SchemaMap.SchemaMapKey key1 = map.new SchemaMapKey("chain", "docproc", "doctype", "from");
SchemaMap.SchemaMapKey key1_1 = map.new SchemaMapKey("chain", "docproc", "doctype", "from");
SchemaMap.SchemaMapKey key2 = map.new SchemaMapKey("chain", "docproc", "doctype2", "from");
assertTrue(key1.equals(key1_1));
assertFalse(key1.equals(key2));
}
@Test
public void testSchemaMapConfig() {
SchemaMap map = new SchemaMap();
SchemamappingConfig.Builder scb = new SchemamappingConfig.Builder();
scb.fieldmapping(new SchemamappingConfig.Fieldmapping.Builder().chain("mychain").docproc("mydocproc").doctype("mydoctype").
indocument("myindoc").inprocessor("myinprocessor"));
map.configure(new SchemamappingConfig(scb));
assertEquals(map.chainMap("mychain", "mydocproc").get(new Pair<>("mydoctype", "myinprocessor")), "myindoc");
}
@Test
public void testSchemaMapNoDocType() {
SchemaMap map = new SchemaMap();
map.addMapping("mychain", "com.yahoo.MyDocProc", null, "inDoc1", "inProc1");
map.addMapping("mychain", "com.yahoo.MyDocProc", null, "inDoc2", "inProc2");
Map<Pair<String, String>, String> cMap = map.chainMap("mychain", "com.yahoo.MyDocProc");
DocumentProcessor proc = new TestDocumentProcessor1();
proc.setFieldMap(cMap);
Map<String, String> dMap = proc.getDocMap("mydoctype");
assertEquals("inDoc1", dMap.get("inProc1"));
assertEquals("inDoc2", dMap.get("inProc2"));
}
@Test
public void testProxyAndSecure() {
DocumentProcessor procOK = new TestDPSecure();
Map<Pair<String, String>, String> fieldMap = new HashMap<>();
fieldMap.put(new Pair<>("album", "titleMapped"), "title");
procOK.setFieldMap(fieldMap);
DocumentPut put = new DocumentPut(getDoc());
Document proxyDoc = new Call(procOK).configDoc(procOK, put).getDocument();
procOK.process(Processing.of(new DocumentPut(proxyDoc)));
assertEquals(proxyDoc.getFieldValue("title").toString(), "MyTitle MyTitle");
}
@Test
public void testProxyAndSecureSecureFailing() {
DocumentProcessor procInsecure = new TestDPInsecure();
Map<Pair<String, String>, String> fieldMap = new HashMap<>();
fieldMap.put(new Pair<>("album", "titleMapped"), "title");
procInsecure.setFieldMap(fieldMap);
DocumentPut put = new DocumentPut(getDoc());
Document doc = new Call(procInsecure).configDoc(procInsecure, put).getDocument();
try {
procInsecure.process(Processing.of(new DocumentPut(doc)));
fail("Insecure docproc went through");
} catch (Exception e) {
assertTrue(e.getMessage().matches(".*allowed.*"));
}
}
/**
* To make it less likely to break schema mapping, we enforce that ProxyDocument does wrap every public
* non-static, non-final method on Document and StructuredFieldValue
*/
@Test
public void testVerifyProxyDocumentOverridesEverything() {
List<Method> allPublicFromProxyDocument = new ArrayList<>();
for (Method m : ProxyDocument.class.getDeclaredMethods()) {
if (Modifier.isPublic(m.getModifiers())) {
allPublicFromProxyDocument.add(m);
}
}
List<Method> allPublicFromDoc = new ArrayList<>();
for (Method m : Document.class.getDeclaredMethods()) {
if (mustBeOverriddenInProxyDocument(m)) {
allPublicFromDoc.add(m);
}
}
for (Method m : StructuredFieldValue.class.getDeclaredMethods()) {
if (mustBeOverriddenInProxyDocument(m)) {
allPublicFromDoc.add(m);
}
}
for (Method m : allPublicFromDoc) {
boolean thisOneOk=false;
for (Method pdM : allPublicFromProxyDocument) {
if (sameNameAndParams(m, pdM)) thisOneOk=true;
}
if (!thisOneOk) {
throw new RuntimeException("ProxyDocument must override all public methods from Document. " +
"Missing: '"+m+"'. If the method doesn't need field mapping or @Accesses check, just " +
"override it and delegate the call to 'doc'.");
}
}
}
private boolean mustBeOverriddenInProxyDocument(Method m) {
if (!Modifier.isPublic(m.getModifiers())) return false;
if (Modifier.isStatic(m.getModifiers())) return false;
if (Modifier.isFinal(m.getModifiers())) return false;
return true;
}
private boolean sameNameAndParams(Method m1, Method m2) {
if (!m1.getName().equals(m2.getName())) return false;
if (m1.getParameterTypes().length!=m2.getParameterTypes().length) return false;
for (int i = 0; i<m1.getParameterTypes().length; i++) {
if (!m1.getParameterTypes()[i].equals(m2.getParameterTypes()[i])) return false;
}
return true;
}
@Accesses(value = { @Field(dataType = "String", description = "", name = "titleMapped") })
public static class TestDPSecure extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("titleMapped", new StringFieldValue("MyTitle"));
document.setFieldValue("titleMapped", new StringFieldValue(document.getFieldValue("titleMapped").toString() + " MyTitle"));
return Progress.DONE;
}
}
@Accesses(value = { @Field(dataType = "String", description = "", name = "titleMappedFoo") })
public static class TestDPInsecure extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("titleMapped", new StringFieldValue("MyTitle"));
document.setFieldValue("titleMapped", new StringFieldValue(document.getFieldValue("titleMapped").toString() + " MyTitle"));
return Progress.DONE;
}
}
public static class TestMappingArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("label", new StringFieldValue("EMI"));
return Progress.DONE;
}
}
public static class TestMappingStructInArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();;
document.setFieldValue("name", new StringFieldValue("peter"));
return Progress.DONE;
}
}
public static class TestRemovingMappingStructInArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.removeFieldValue("name");
return Progress.DONE;
}
}
} |
Use Objects.hash()? | public int hashCode() {
int result = totalCount;
result = 31 * result + instanceCounts.hashCode();
return result;
} | result = 31 * result + instanceCounts.hashCode(); | public int hashCode() {
return Objects.hash(totalCount, instanceCounts);
} | class Ec2InstanceCounts {
private final int totalCount;
private final Map<String, Integer> instanceCounts;
private Ec2InstanceCounts(int totalCount, Map<String, Integer> instanceCounts) {
this.totalCount = totalCount;
this.instanceCounts = Map.copyOf(instanceCounts);
}
public int getTotalCount() {
return totalCount;
}
public Map<String, Integer> getInstanceCounts() {
return instanceCounts;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Ec2InstanceCounts that = (Ec2InstanceCounts) o;
if (totalCount != that.totalCount) return false;
return instanceCounts.equals(that.instanceCounts);
}
@Override
@Override
public String toString() {
return "Ec2InstanceLimits{" +
"totalLimit=" + totalCount +
", instanceCounts=" + instanceCounts +
'}';
}
} | class Ec2InstanceCounts {
private final int totalCount;
private final Map<String, Integer> instanceCounts;
private Ec2InstanceCounts(int totalCount, Map<String, Integer> instanceCounts) {
this.totalCount = totalCount;
this.instanceCounts = Map.copyOf(instanceCounts);
}
public int getTotalCount() {
return totalCount;
}
/** Returns map of counts by instance type, e.g. 'r5.2xlarge' */
public Map<String, Integer> getInstanceCounts() {
return instanceCounts;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Ec2InstanceCounts that = (Ec2InstanceCounts) o;
return totalCount == that.totalCount &&
instanceCounts.equals(that.instanceCounts);
}
@Override
@Override
public String toString() {
return "Ec2InstanceLimits{" +
"totalLimit=" + totalCount +
", instanceCounts=" + instanceCounts +
'}';
}
} |
this seems a bit strange. The target port is likely https only, but scheme can be http in cases when the sslcontextfactory is missing? | CloseableHttpResponse requestStatusHtml() throws IOException {
String scheme = sslContextFactory != null ? "https" : "http";
HttpGet request = new HttpGet(scheme + ":
request.setHeader("Connection", "Close");
return client().execute(request);
} | String scheme = sslContextFactory != null ? "https" : "http"; | CloseableHttpResponse requestStatusHtml() throws IOException {
HttpGet request = new HttpGet("https:
request.setHeader("Connection", "Close");
return client().execute(request);
} | class ProxyTarget implements AutoCloseable {
final int port;
final SslContextFactory sslContextFactory;
volatile CloseableHttpClient client;
ProxyTarget(int port, SslContextFactory sslContextFactory) {
this.port = port;
this.sslContextFactory = sslContextFactory;
}
private CloseableHttpClient client() {
if (client == null) {
synchronized (this) {
if (client == null) {
client = HttpClientBuilder.create()
.disableAutomaticRetries()
.setConnectionReuseStrategy(NoConnectionReuseStrategy.INSTANCE)
.setSslcontext(sslContextFactory != null ? sslContextFactory.getSslContext() : null)
.setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE)
.setUserTokenHandler(context -> null)
.build();
}
}
}
return client;
}
@Override
public void close() throws IOException {
synchronized (this) {
if (client != null) {
client.close();
}
}
}
} | class ProxyTarget implements AutoCloseable {
final int port;
final SslContextFactory sslContextFactory;
volatile CloseableHttpClient client;
ProxyTarget(int port, SslContextFactory sslContextFactory) {
this.port = port;
this.sslContextFactory = sslContextFactory;
}
private CloseableHttpClient client() {
if (client == null) {
synchronized (this) {
if (client == null) {
client = HttpClientBuilder.create()
.disableAutomaticRetries()
.setConnectionReuseStrategy(NoConnectionReuseStrategy.INSTANCE)
.setSslcontext(sslContextFactory.getSslContext())
.setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE)
.setUserTokenHandler(context -> null)
.build();
}
}
}
return client;
}
@Override
public void close() throws IOException {
synchronized (this) {
if (client != null) {
client.close();
}
}
}
} |
Yes, `sslContextFactory` can be null and it's handled correctly. I have removed http support anyway as it will not be tested on factory, and it does not really make sense to use in production. | CloseableHttpResponse requestStatusHtml() throws IOException {
String scheme = sslContextFactory != null ? "https" : "http";
HttpGet request = new HttpGet(scheme + ":
request.setHeader("Connection", "Close");
return client().execute(request);
} | String scheme = sslContextFactory != null ? "https" : "http"; | CloseableHttpResponse requestStatusHtml() throws IOException {
HttpGet request = new HttpGet("https:
request.setHeader("Connection", "Close");
return client().execute(request);
} | class ProxyTarget implements AutoCloseable {
final int port;
final SslContextFactory sslContextFactory;
volatile CloseableHttpClient client;
ProxyTarget(int port, SslContextFactory sslContextFactory) {
this.port = port;
this.sslContextFactory = sslContextFactory;
}
private CloseableHttpClient client() {
if (client == null) {
synchronized (this) {
if (client == null) {
client = HttpClientBuilder.create()
.disableAutomaticRetries()
.setConnectionReuseStrategy(NoConnectionReuseStrategy.INSTANCE)
.setSslcontext(sslContextFactory != null ? sslContextFactory.getSslContext() : null)
.setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE)
.setUserTokenHandler(context -> null)
.build();
}
}
}
return client;
}
@Override
public void close() throws IOException {
synchronized (this) {
if (client != null) {
client.close();
}
}
}
} | class ProxyTarget implements AutoCloseable {
final int port;
final SslContextFactory sslContextFactory;
volatile CloseableHttpClient client;
ProxyTarget(int port, SslContextFactory sslContextFactory) {
this.port = port;
this.sslContextFactory = sslContextFactory;
}
private CloseableHttpClient client() {
if (client == null) {
synchronized (this) {
if (client == null) {
client = HttpClientBuilder.create()
.disableAutomaticRetries()
.setConnectionReuseStrategy(NoConnectionReuseStrategy.INSTANCE)
.setSslcontext(sslContextFactory.getSslContext())
.setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE)
.setUserTokenHandler(context -> null)
.build();
}
}
}
return client;
}
@Override
public void close() throws IOException {
synchronized (this) {
if (client != null) {
client.close();
}
}
}
} |
Fixed | public void testMappedDocAPI() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(mapped.getFieldValue("title"), doc.getFieldValue("title"));
assertEquals(mapped.getFieldValue(new com.yahoo.document.Field("title")), doc.getFieldValue((new com.yahoo.document.Field("title"))));
mapped.setFieldValue("title", "foo");
assertEquals(doc.getFieldValue("title").getWrappedValue(), "foo");
assertEquals(mapped.getWrappedDocumentOperation().getId().toString(), "id:map:album::1");
assertEquals(doc, mapped);
assertEquals(doc.toString(), mapped.toString());
assertEquals(doc.hashCode(), mapped.hashCode());
assertEquals(doc.clone(), mapped.clone());
assertEquals(doc.iterator().hasNext(), mapped.iterator().hasNext());
assertEquals(doc.getId(), mapped.getId());
assertEquals(doc.getDataType(), mapped.getDataType());
mapped.setLastModified(56l);
assertEquals(doc.getLastModified(), (Long)56l);
assertEquals(mapped.getLastModified(), (Long)56l);
mapped.setId(new DocumentId("id:map:album::2"));
assertEquals(mapped.getId().toString(), "id:map:album::2");
assertEquals(doc.getId().toString(), "id:map:album::2");
assertEquals(doc.getHeader(), mapped.getHeader());
assertEquals(doc.getBody(), mapped.getBody());
assertEquals(doc.getSerializedSize(), mapped.getSerializedSize());
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ByteArrayOutputStream bos2 = new ByteArrayOutputStream();
mapped.serialize(bos);
doc.serialize(bos2);
assertEquals(bos.toString(), bos2.toString());
assertEquals(mapped.toXml(), doc.toXml());
assertEquals(mapped.getFieldCount(), doc.getFieldCount());
assertTrue(mapped.getDocument()==doc);
mapped.clear();
assertNull(mapped.getFieldValue("title"));
assertNull(doc.getFieldValue("title"));
try {
mapped.setDataType(new DocumentType("newType"));
fail("Should not be bale to set new type");
} catch (IllegalArgumentException e) {
assertEquals("Trying to set a document type (newType) that doesn't match the document id (id:map:album::2).", e.getMessage());
}
assertEquals(doc.getDataType().getName(), "album");
} | fail("Should not be bale to set new type"); | public void testMappedDocAPI() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(mapped.getFieldValue("title"), doc.getFieldValue("title"));
assertEquals(mapped.getFieldValue(new com.yahoo.document.Field("title")), doc.getFieldValue((new com.yahoo.document.Field("title"))));
mapped.setFieldValue("title", "foo");
assertEquals(doc.getFieldValue("title").getWrappedValue(), "foo");
assertEquals(mapped.getWrappedDocumentOperation().getId().toString(), "id:map:album::1");
assertEquals(doc, mapped);
assertEquals(doc.toString(), mapped.toString());
assertEquals(doc.hashCode(), mapped.hashCode());
assertEquals(doc.clone(), mapped.clone());
assertEquals(doc.iterator().hasNext(), mapped.iterator().hasNext());
assertEquals(doc.getId(), mapped.getId());
assertEquals(doc.getDataType(), mapped.getDataType());
mapped.setLastModified(56l);
assertEquals(doc.getLastModified(), (Long)56l);
assertEquals(mapped.getLastModified(), (Long)56l);
mapped.setId(new DocumentId("id:map:album::2"));
assertEquals(mapped.getId().toString(), "id:map:album::2");
assertEquals(doc.getId().toString(), "id:map:album::2");
assertEquals(doc.getHeader(), mapped.getHeader());
assertEquals(doc.getBody(), mapped.getBody());
assertEquals(doc.getSerializedSize(), mapped.getSerializedSize());
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ByteArrayOutputStream bos2 = new ByteArrayOutputStream();
mapped.serialize(bos);
doc.serialize(bos2);
assertEquals(bos.toString(), bos2.toString());
assertEquals(mapped.toXml(), doc.toXml());
assertEquals(mapped.getFieldCount(), doc.getFieldCount());
assertTrue(mapped.getDocument()==doc);
mapped.clear();
assertNull(mapped.getFieldValue("title"));
assertNull(doc.getFieldValue("title"));
try {
mapped.setDataType(new DocumentType("newType"));
fail("Should not be able to set new type");
} catch (IllegalArgumentException e) {
assertEquals("Trying to set a document type (newType) that doesn't match the document id (id:map:album::2).", e.getMessage());
}
assertEquals(doc.getDataType().getName(), "album");
} | class SchemaMappingAndAccessesTest {
private Document getDoc() {
DocumentType type = new DocumentType("album");
AnnotationType personType = new AnnotationType("person");
Annotation person = new Annotation(personType);
type.addField("title", DataType.STRING);
type.addField("artist", DataType.STRING);
type.addField("guitarist", DataType.STRING);
type.addField("year", DataType.INT);
type.addField("labels", DataType.getArray(DataType.STRING));
Document doc = new Document(type, new DocumentId("id:map:album::1"));
doc.setFieldValue("title", new StringFieldValue("Black Rock"));
StringFieldValue joe = new StringFieldValue("Joe Bonamassa");
joe.setSpanTree(new SpanTree("mytree").annotate(person));
doc.setFieldValue("artist", joe);
doc.setFieldValue("year", new IntegerFieldValue(2010));
Array<StringFieldValue> labels = new Array<>(type.getField("labels").getDataType());
labels.add(new StringFieldValue("audun"));
labels.add(new StringFieldValue("tylden"));
doc.setFieldValue("labels", labels);
StructDataType personStructType = new StructDataType("artist");
personStructType.addField(new com.yahoo.document.Field("firstname", DataType.STRING));
personStructType.addField(new com.yahoo.document.Field("lastname", DataType.STRING));
type.addField("listeners", DataType.getArray(personStructType));
Array<Struct> listeners = new Array<>(type.getField("listeners").getDataType());
Struct listenerOne = new Struct(personStructType);
listenerOne.setFieldValue("firstname", new StringFieldValue("per"));
listenerOne.setFieldValue("lastname", new StringFieldValue("olsen"));
Struct listenerTwo = new Struct(personStructType);
listenerTwo.setFieldValue("firstname", new StringFieldValue("anders"));
listenerTwo.setFieldValue("lastname", new StringFieldValue("and"));
listeners.add(listenerOne);
listeners.add(listenerTwo);
doc.setFieldValue("listeners", listeners);
return doc;
}
@Test
public void testMappingArrays() {
Document doc = getDoc();
DocumentProcessor proc = new TestMappingArrayProcessor();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("label", "labels[0]");
ProxyDocument mapped = new ProxyDocument(proc, doc, fieldMap);
Processing p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<StringFieldValue>) doc.getFieldValue("labels")).size());
assertEquals(new StringFieldValue("EMI"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(0));
assertEquals(new StringFieldValue("tylden"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(1));
fieldMap.clear();
fieldMap.put("label", "labels[2]");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<StringFieldValue>) doc.getFieldValue("labels")).size());
assertEquals(new StringFieldValue("EMI"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(0));
assertEquals(new StringFieldValue("tylden"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(1));
}
@Test
public void testMappingStructsInArrays() {
Document doc = getDoc();
DocumentProcessor proc = new TestMappingStructInArrayProcessor();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("name", "listeners[0].firstname");
ProxyDocument mapped = new ProxyDocument(proc, doc, fieldMap);
Processing p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertEquals("and", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname")).getString()));
fieldMap.clear();
fieldMap.put("name", "listeners[2].firstname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertEquals("and", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname")).getString()));
proc = new TestRemovingMappingStructInArrayProcessor();
fieldMap.clear();
fieldMap.put("name", "listeners[1].lastname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertNull(((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname"));
fieldMap.clear();
fieldMap.put("name", "listeners[2].lastname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertNull(((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname"));
}
@Test
public void testMappingSpanTrees() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
fieldMap.put("g", "guitarist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
Iterator<SpanTree> itSpanTreesDoc = ((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator();
Iterator<Annotation> itAnnotDoc = itSpanTreesDoc.next().iterator();
Iterator<SpanTree> itSpanTreesMapped = ((StringFieldValue) mapped.getFieldValue("artist")).getSpanTrees().iterator();
Iterator<Annotation> itAnnotMapped = itSpanTreesMapped.next().iterator();
assertEquals(itAnnotDoc.next().getType().getName(), "person");
assertFalse(itAnnotDoc.hasNext());
assertEquals(itAnnotMapped.next().getType().getName(), "person");
assertFalse(itAnnotMapped.hasNext());
AnnotationType guitaristType = new AnnotationType("guitarist");
Annotation guitarist = new Annotation(guitaristType);
StringFieldValue bona = new StringFieldValue("Bonamassa");
bona.setSpanTree(new SpanTree("mytree").annotate(guitarist));
StringFieldValue clapton = new StringFieldValue("Clapton");
mapped.setFieldValue("a", bona);
mapped.setFieldValue("g", clapton);
itSpanTreesDoc = ((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator();
itAnnotDoc = itSpanTreesDoc.next().iterator();
itSpanTreesMapped = ((StringFieldValue) mapped.getFieldValue("artist")).getSpanTrees().iterator();
itAnnotMapped = itSpanTreesMapped.next().iterator();
assertEquals(itAnnotDoc.next().getType().getName(), "guitarist");
assertFalse(itAnnotDoc.hasNext());
assertEquals(itAnnotMapped.next().getType().getName(), "guitarist");
assertFalse(itAnnotMapped.hasNext());
assertSame(((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator().next(), ((StringFieldValue) mapped.getFieldValue("a")).getSpanTrees().iterator().next());
}
@Test
public void testMappedDoc() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(new StringFieldValue("Black Rock"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("Joe Bonamassa").getWrappedValue(), mapped.getFieldValue("a").getWrappedValue());
mapped.setFieldValue("t", new StringFieldValue("The Ballad Of John Henry"));
StringFieldValue bona = new StringFieldValue("Bonamassa");
mapped.setFieldValue("a", bona);
assertEquals(new StringFieldValue("The Ballad Of John Henry"), doc.getFieldValue("title"));
assertEquals(new StringFieldValue("The Ballad Of John Henry"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("Bonamassa"), doc.getFieldValue("artist"));
assertEquals(new StringFieldValue("Bonamassa"), mapped.getFieldValue("a"));
mapped.setFieldValue("a", mapped.getFieldValue("a") + "Hughes");
assertEquals(new StringFieldValue("BonamassaHughes"), mapped.getFieldValue("a"));
StringFieldValue unmapped1 = (StringFieldValue) doc.getFieldValue("artist");
StringFieldValue unmapped2 = (StringFieldValue) doc.getFieldValue("artist");
assertTrue(unmapped1==unmapped2);
unmapped1.setSpanTree(new SpanTree("test"));
assertEquals(unmapped2.getSpanTree("test").getName(), "test");
StringFieldValue mapped1 = (StringFieldValue) mapped.getFieldValue("a");
mapped1.setSpanTree(new SpanTree("test2"));
StringFieldValue mapped2 = (StringFieldValue) mapped.getFieldValue("a");
assertTrue(mapped1==mapped2);
assertEquals(mapped2.getSpanTree("test2").getName(), "test2");
mapped.removeFieldValue("a");
assertEquals(mapped.getFieldValue("a"), null);
mapped.removeFieldValue(mapped.getField("t"));
assertEquals(mapped.getFieldValue("t"), null);
mapped.setFieldValue("a", new StringFieldValue("Bonamassa"));
assertEquals(new StringFieldValue("Bonamassa"), doc.getFieldValue("artist"));
mapped.removeFieldValue("a");
assertEquals(mapped.getFieldValue("a"), null);
}
@Test
@SuppressWarnings("deprecation")
@Test
public void testMappedDocUpdateAPI() {
Document doc = getDoc();
DocumentType type = doc.getDataType();
DocumentUpdate dud = new DocumentUpdate(type, new DocumentId("id:map:album::1"));
com.yahoo.document.Field title = type.getField("title");
FieldUpdate assignSingle = FieldUpdate.createAssign(title, new StringFieldValue("something"));
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocumentUpdate pup = new ProxyDocumentUpdate(dud, fieldMap);
pup.addFieldUpdate(assignSingle);
assertEquals(pup.fieldUpdates().toString(), dud.fieldUpdates().toString());
assertEquals(pup.getDocumentType(), dud.getDocumentType());
assertEquals(pup.getFieldUpdate(title).size(), 1);
assertEquals(pup.getFieldUpdate(title), dud.fieldUpdates().iterator().next());
assertEquals(pup.getFieldUpdate("title"), dud.getFieldUpdate("title"));
assertEquals(pup.getId(), dud.getId());
assertEquals(pup.getType(), dud.getType());
assertEquals(pup.applyTo(doc), dud);
assertEquals(doc.getFieldValue("title").getWrappedValue(), "something");
assertEquals(pup, dud);
assertEquals(pup.hashCode(), dud.hashCode());
assertEquals(pup.toString(), dud.toString());
assertEquals(pup.size(), dud.size());
assertEquals(pup.getWrappedDocumentOperation().getId().toString(), "id:map:album::1");
}
@Test
public void testMappedDocStruct() {
StructDataType materialsStructType = new StructDataType("materialstype");
materialsStructType.addField(new com.yahoo.document.Field("ceiling", DataType.STRING));
materialsStructType.addField(new com.yahoo.document.Field("walls", DataType.STRING));
DocumentType docType = new DocumentType("album");
docType.addField("title", DataType.STRING);
docType.addField("artist", DataType.STRING);
StructDataType storeStructType = new StructDataType("storetype");
storeStructType.addField(new com.yahoo.document.Field("name", DataType.STRING));
storeStructType.addField(new com.yahoo.document.Field("city", DataType.STRING));
storeStructType.addField(new com.yahoo.document.Field("materials", materialsStructType));
docType.addField("store", storeStructType);
Document doc = new Document(docType, new DocumentId("id:map:album::1"));
doc.setFieldValue("title", new StringFieldValue("Black Rock"));
doc.setFieldValue("artist", new StringFieldValue("Joe Bonamassa"));
Struct material = new Struct(materialsStructType);
material.setFieldValue("ceiling", new StringFieldValue("wood"));
material.setFieldValue("walls", new StringFieldValue("brick"));
Struct store = new Struct(storeStructType);
store.setFieldValue("name", new StringFieldValue("Platekompaniet"));
store.setFieldValue("city", new StringFieldValue("Trondheim"));
store.setFieldValue(storeStructType.getField("materials"), material);
doc.setFieldValue(docType.getField("store"), store);
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("c", "store.city");
fieldMap.put("w", "store.materials.walls");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(new StringFieldValue("Trondheim"), mapped.getFieldValue("c"));
assertEquals(new StringFieldValue("Black Rock"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("brick"), mapped.getFieldValue("w"));
assertEquals(new StringFieldValue("brick"), material.getFieldValue("walls"));
mapped.setFieldValue("c", new StringFieldValue("Steinkjer"));
mapped.setFieldValue("w", new StringFieldValue("plaster"));
assertEquals(new StringFieldValue("plaster"), mapped.getFieldValue("w"));
assertEquals(new StringFieldValue("plaster"), material.getFieldValue("walls"));
assertEquals(new StringFieldValue("Steinkjer"), store.getFieldValue("city"));
assertEquals(new StringFieldValue("Steinkjer"), mapped.getFieldValue("c"));
assertEquals(new StringFieldValue("Steinkjer"), mapped.getFieldValue("c"));
mapped.setFieldValue("c", new StringFieldValue("Levanger"));
assertEquals(new StringFieldValue("Levanger"), store.getFieldValue("city"));
assertEquals(new StringFieldValue("Levanger"), mapped.getFieldValue("c"));
mapped.setFieldValue("c", mapped.getFieldValue("c") + "Kommune");
assertEquals(new StringFieldValue("LevangerKommune"), mapped.getFieldValue("c"));
}
@Test
public void testSchemaMap() {
SchemaMap map = new SchemaMap();
map.addMapping("mychain", "com.yahoo.MyDocProc", "mydoctype", "inDoc1", "inProc1");
map.addMapping("mychain", "com.yahoo.MyDocProc", "mydoctype", "inDoc2", "inProc2");
Map<Pair<String, String>, String> cMap = map.chainMap("mychain", "com.yahoo.MyDocProc");
assertEquals("inDoc1", cMap.get(new Pair<>("mydoctype", "inProc1")));
assertEquals("inDoc2", cMap.get(new Pair<>("mydoctype", "inProc2")));
assertNull(cMap.get(new Pair<>("invalidtype", "inProc2")));
Map<Pair<String, String>, String> noMap = map.chainMap("invalidchain", "com.yahoo.MyDocProc");
Map<Pair<String, String>, String> noMap2 = map.chainMap("mychain", "com.yahoo.MyInvalidDocProc");
assertTrue(noMap.isEmpty());
assertTrue(noMap2.isEmpty());
DocumentProcessor proc = new TestDocumentProcessor1();
proc.setFieldMap(cMap);
Map<String, String> dMap = proc.getDocMap("mydoctype");
assertEquals("inDoc1", dMap.get("inProc1"));
assertEquals("inDoc2", dMap.get("inProc2"));
}
@Test
public void testSchemaMapKey() {
SchemaMap map = new SchemaMap();
SchemaMap.SchemaMapKey key1 = map.new SchemaMapKey("chain", "docproc", "doctype", "from");
SchemaMap.SchemaMapKey key1_1 = map.new SchemaMapKey("chain", "docproc", "doctype", "from");
SchemaMap.SchemaMapKey key2 = map.new SchemaMapKey("chain", "docproc", "doctype2", "from");
assertTrue(key1.equals(key1_1));
assertFalse(key1.equals(key2));
}
@Test
public void testSchemaMapConfig() {
SchemaMap map = new SchemaMap();
SchemamappingConfig.Builder scb = new SchemamappingConfig.Builder();
scb.fieldmapping(new SchemamappingConfig.Fieldmapping.Builder().chain("mychain").docproc("mydocproc").doctype("mydoctype").
indocument("myindoc").inprocessor("myinprocessor"));
map.configure(new SchemamappingConfig(scb));
assertEquals(map.chainMap("mychain", "mydocproc").get(new Pair<>("mydoctype", "myinprocessor")), "myindoc");
}
@Test
public void testSchemaMapNoDocType() {
SchemaMap map = new SchemaMap();
map.addMapping("mychain", "com.yahoo.MyDocProc", null, "inDoc1", "inProc1");
map.addMapping("mychain", "com.yahoo.MyDocProc", null, "inDoc2", "inProc2");
Map<Pair<String, String>, String> cMap = map.chainMap("mychain", "com.yahoo.MyDocProc");
DocumentProcessor proc = new TestDocumentProcessor1();
proc.setFieldMap(cMap);
Map<String, String> dMap = proc.getDocMap("mydoctype");
assertEquals("inDoc1", dMap.get("inProc1"));
assertEquals("inDoc2", dMap.get("inProc2"));
}
@Test
public void testProxyAndSecure() {
DocumentProcessor procOK = new TestDPSecure();
Map<Pair<String, String>, String> fieldMap = new HashMap<>();
fieldMap.put(new Pair<>("album", "titleMapped"), "title");
procOK.setFieldMap(fieldMap);
DocumentPut put = new DocumentPut(getDoc());
Document proxyDoc = new Call(procOK).configDoc(procOK, put).getDocument();
procOK.process(Processing.of(new DocumentPut(proxyDoc)));
assertEquals(proxyDoc.getFieldValue("title").toString(), "MyTitle MyTitle");
}
@Test
public void testProxyAndSecureSecureFailing() {
DocumentProcessor procInsecure = new TestDPInsecure();
Map<Pair<String, String>, String> fieldMap = new HashMap<>();
fieldMap.put(new Pair<>("album", "titleMapped"), "title");
procInsecure.setFieldMap(fieldMap);
DocumentPut put = new DocumentPut(getDoc());
Document doc = new Call(procInsecure).configDoc(procInsecure, put).getDocument();
try {
procInsecure.process(Processing.of(new DocumentPut(doc)));
fail("Insecure docproc went through");
} catch (Exception e) {
assertTrue(e.getMessage().matches(".*allowed.*"));
}
}
/**
* To make it less likely to break schema mapping, we enforce that ProxyDocument does wrap every public
* non-static, non-final method on Document and StructuredFieldValue
*/
@Test
public void testVerifyProxyDocumentOverridesEverything() {
List<Method> allPublicFromProxyDocument = new ArrayList<>();
for (Method m : ProxyDocument.class.getDeclaredMethods()) {
if (Modifier.isPublic(m.getModifiers())) {
allPublicFromProxyDocument.add(m);
}
}
List<Method> allPublicFromDoc = new ArrayList<>();
for (Method m : Document.class.getDeclaredMethods()) {
if (mustBeOverriddenInProxyDocument(m)) {
allPublicFromDoc.add(m);
}
}
for (Method m : StructuredFieldValue.class.getDeclaredMethods()) {
if (mustBeOverriddenInProxyDocument(m)) {
allPublicFromDoc.add(m);
}
}
for (Method m : allPublicFromDoc) {
boolean thisOneOk=false;
for (Method pdM : allPublicFromProxyDocument) {
if (sameNameAndParams(m, pdM)) thisOneOk=true;
}
if (!thisOneOk) {
throw new RuntimeException("ProxyDocument must override all public methods from Document. " +
"Missing: '"+m+"'. If the method doesn't need field mapping or @Accesses check, just " +
"override it and delegate the call to 'doc'.");
}
}
}
private boolean mustBeOverriddenInProxyDocument(Method m) {
if (!Modifier.isPublic(m.getModifiers())) return false;
if (Modifier.isStatic(m.getModifiers())) return false;
if (Modifier.isFinal(m.getModifiers())) return false;
return true;
}
private boolean sameNameAndParams(Method m1, Method m2) {
if (!m1.getName().equals(m2.getName())) return false;
if (m1.getParameterTypes().length!=m2.getParameterTypes().length) return false;
for (int i = 0; i<m1.getParameterTypes().length; i++) {
if (!m1.getParameterTypes()[i].equals(m2.getParameterTypes()[i])) return false;
}
return true;
}
@Accesses(value = { @Field(dataType = "String", description = "", name = "titleMapped") })
public static class TestDPSecure extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("titleMapped", new StringFieldValue("MyTitle"));
document.setFieldValue("titleMapped", new StringFieldValue(document.getFieldValue("titleMapped").toString() + " MyTitle"));
return Progress.DONE;
}
}
@Accesses(value = { @Field(dataType = "String", description = "", name = "titleMappedFoo") })
public static class TestDPInsecure extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("titleMapped", new StringFieldValue("MyTitle"));
document.setFieldValue("titleMapped", new StringFieldValue(document.getFieldValue("titleMapped").toString() + " MyTitle"));
return Progress.DONE;
}
}
public static class TestMappingArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("label", new StringFieldValue("EMI"));
return Progress.DONE;
}
}
public static class TestMappingStructInArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();;
document.setFieldValue("name", new StringFieldValue("peter"));
return Progress.DONE;
}
}
public static class TestRemovingMappingStructInArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.removeFieldValue("name");
return Progress.DONE;
}
}
} | class SchemaMappingAndAccessesTest {
private Document getDoc() {
DocumentType type = new DocumentType("album");
AnnotationType personType = new AnnotationType("person");
Annotation person = new Annotation(personType);
type.addField("title", DataType.STRING);
type.addField("artist", DataType.STRING);
type.addField("guitarist", DataType.STRING);
type.addField("year", DataType.INT);
type.addField("labels", DataType.getArray(DataType.STRING));
Document doc = new Document(type, new DocumentId("id:map:album::1"));
doc.setFieldValue("title", new StringFieldValue("Black Rock"));
StringFieldValue joe = new StringFieldValue("Joe Bonamassa");
joe.setSpanTree(new SpanTree("mytree").annotate(person));
doc.setFieldValue("artist", joe);
doc.setFieldValue("year", new IntegerFieldValue(2010));
Array<StringFieldValue> labels = new Array<>(type.getField("labels").getDataType());
labels.add(new StringFieldValue("audun"));
labels.add(new StringFieldValue("tylden"));
doc.setFieldValue("labels", labels);
StructDataType personStructType = new StructDataType("artist");
personStructType.addField(new com.yahoo.document.Field("firstname", DataType.STRING));
personStructType.addField(new com.yahoo.document.Field("lastname", DataType.STRING));
type.addField("listeners", DataType.getArray(personStructType));
Array<Struct> listeners = new Array<>(type.getField("listeners").getDataType());
Struct listenerOne = new Struct(personStructType);
listenerOne.setFieldValue("firstname", new StringFieldValue("per"));
listenerOne.setFieldValue("lastname", new StringFieldValue("olsen"));
Struct listenerTwo = new Struct(personStructType);
listenerTwo.setFieldValue("firstname", new StringFieldValue("anders"));
listenerTwo.setFieldValue("lastname", new StringFieldValue("and"));
listeners.add(listenerOne);
listeners.add(listenerTwo);
doc.setFieldValue("listeners", listeners);
return doc;
}
@Test
public void testMappingArrays() {
Document doc = getDoc();
DocumentProcessor proc = new TestMappingArrayProcessor();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("label", "labels[0]");
ProxyDocument mapped = new ProxyDocument(proc, doc, fieldMap);
Processing p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<StringFieldValue>) doc.getFieldValue("labels")).size());
assertEquals(new StringFieldValue("EMI"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(0));
assertEquals(new StringFieldValue("tylden"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(1));
fieldMap.clear();
fieldMap.put("label", "labels[2]");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<StringFieldValue>) doc.getFieldValue("labels")).size());
assertEquals(new StringFieldValue("EMI"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(0));
assertEquals(new StringFieldValue("tylden"), ((Array<StringFieldValue>) doc.getFieldValue("labels")).get(1));
}
@Test
public void testMappingStructsInArrays() {
Document doc = getDoc();
DocumentProcessor proc = new TestMappingStructInArrayProcessor();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("name", "listeners[0].firstname");
ProxyDocument mapped = new ProxyDocument(proc, doc, fieldMap);
Processing p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertEquals("and", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname")).getString()));
fieldMap.clear();
fieldMap.put("name", "listeners[2].firstname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertEquals("and", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname")).getString()));
proc = new TestRemovingMappingStructInArrayProcessor();
fieldMap.clear();
fieldMap.put("name", "listeners[1].lastname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
proc.process(p);
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertNull(((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname"));
fieldMap.clear();
fieldMap.put("name", "listeners[2].lastname");
mapped = new ProxyDocument(proc, doc, fieldMap);
p = Processing.of(new DocumentPut(mapped));
try {
proc.process(p);
fail("Should not have worked");
} catch (IllegalArgumentException iae) {
}
assertEquals(2, ((Array<Struct>) doc.getFieldValue("listeners")).size());
assertEquals("peter", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("firstname")).getString()));
assertEquals("olsen", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(0).getFieldValue("lastname")).getString()));
assertEquals("anders", (((StringFieldValue)((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("firstname")).getString()));
assertNull(((Array<Struct>) doc.getFieldValue("listeners")).get(1).getFieldValue("lastname"));
}
@Test
public void testMappingSpanTrees() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
fieldMap.put("g", "guitarist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
Iterator<SpanTree> itSpanTreesDoc = ((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator();
Iterator<Annotation> itAnnotDoc = itSpanTreesDoc.next().iterator();
Iterator<SpanTree> itSpanTreesMapped = ((StringFieldValue) mapped.getFieldValue("artist")).getSpanTrees().iterator();
Iterator<Annotation> itAnnotMapped = itSpanTreesMapped.next().iterator();
assertEquals(itAnnotDoc.next().getType().getName(), "person");
assertFalse(itAnnotDoc.hasNext());
assertEquals(itAnnotMapped.next().getType().getName(), "person");
assertFalse(itAnnotMapped.hasNext());
AnnotationType guitaristType = new AnnotationType("guitarist");
Annotation guitarist = new Annotation(guitaristType);
StringFieldValue bona = new StringFieldValue("Bonamassa");
bona.setSpanTree(new SpanTree("mytree").annotate(guitarist));
StringFieldValue clapton = new StringFieldValue("Clapton");
mapped.setFieldValue("a", bona);
mapped.setFieldValue("g", clapton);
itSpanTreesDoc = ((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator();
itAnnotDoc = itSpanTreesDoc.next().iterator();
itSpanTreesMapped = ((StringFieldValue) mapped.getFieldValue("artist")).getSpanTrees().iterator();
itAnnotMapped = itSpanTreesMapped.next().iterator();
assertEquals(itAnnotDoc.next().getType().getName(), "guitarist");
assertFalse(itAnnotDoc.hasNext());
assertEquals(itAnnotMapped.next().getType().getName(), "guitarist");
assertFalse(itAnnotMapped.hasNext());
assertSame(((StringFieldValue) doc.getFieldValue("artist")).getSpanTrees().iterator().next(), ((StringFieldValue) mapped.getFieldValue("a")).getSpanTrees().iterator().next());
}
@Test
public void testMappedDoc() {
Document doc = getDoc();
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(new StringFieldValue("Black Rock"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("Joe Bonamassa").getWrappedValue(), mapped.getFieldValue("a").getWrappedValue());
mapped.setFieldValue("t", new StringFieldValue("The Ballad Of John Henry"));
StringFieldValue bona = new StringFieldValue("Bonamassa");
mapped.setFieldValue("a", bona);
assertEquals(new StringFieldValue("The Ballad Of John Henry"), doc.getFieldValue("title"));
assertEquals(new StringFieldValue("The Ballad Of John Henry"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("Bonamassa"), doc.getFieldValue("artist"));
assertEquals(new StringFieldValue("Bonamassa"), mapped.getFieldValue("a"));
mapped.setFieldValue("a", mapped.getFieldValue("a") + "Hughes");
assertEquals(new StringFieldValue("BonamassaHughes"), mapped.getFieldValue("a"));
StringFieldValue unmapped1 = (StringFieldValue) doc.getFieldValue("artist");
StringFieldValue unmapped2 = (StringFieldValue) doc.getFieldValue("artist");
assertTrue(unmapped1==unmapped2);
unmapped1.setSpanTree(new SpanTree("test"));
assertEquals(unmapped2.getSpanTree("test").getName(), "test");
StringFieldValue mapped1 = (StringFieldValue) mapped.getFieldValue("a");
mapped1.setSpanTree(new SpanTree("test2"));
StringFieldValue mapped2 = (StringFieldValue) mapped.getFieldValue("a");
assertTrue(mapped1==mapped2);
assertEquals(mapped2.getSpanTree("test2").getName(), "test2");
mapped.removeFieldValue("a");
assertEquals(mapped.getFieldValue("a"), null);
mapped.removeFieldValue(mapped.getField("t"));
assertEquals(mapped.getFieldValue("t"), null);
mapped.setFieldValue("a", new StringFieldValue("Bonamassa"));
assertEquals(new StringFieldValue("Bonamassa"), doc.getFieldValue("artist"));
mapped.removeFieldValue("a");
assertEquals(mapped.getFieldValue("a"), null);
}
@Test
@SuppressWarnings("deprecation")
@Test
public void testMappedDocUpdateAPI() {
Document doc = getDoc();
DocumentType type = doc.getDataType();
DocumentUpdate dud = new DocumentUpdate(type, new DocumentId("id:map:album::1"));
com.yahoo.document.Field title = type.getField("title");
FieldUpdate assignSingle = FieldUpdate.createAssign(title, new StringFieldValue("something"));
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("a", "artist");
ProxyDocumentUpdate pup = new ProxyDocumentUpdate(dud, fieldMap);
pup.addFieldUpdate(assignSingle);
assertEquals(pup.fieldUpdates().toString(), dud.fieldUpdates().toString());
assertEquals(pup.getDocumentType(), dud.getDocumentType());
assertEquals(pup.getFieldUpdate(title).size(), 1);
assertEquals(pup.getFieldUpdate(title), dud.fieldUpdates().iterator().next());
assertEquals(pup.getFieldUpdate("title"), dud.getFieldUpdate("title"));
assertEquals(pup.getId(), dud.getId());
assertEquals(pup.getType(), dud.getType());
assertEquals(pup.applyTo(doc), dud);
assertEquals(doc.getFieldValue("title").getWrappedValue(), "something");
assertEquals(pup, dud);
assertEquals(pup.hashCode(), dud.hashCode());
assertEquals(pup.toString(), dud.toString());
assertEquals(pup.size(), dud.size());
assertEquals(pup.getWrappedDocumentOperation().getId().toString(), "id:map:album::1");
}
@Test
public void testMappedDocStruct() {
StructDataType materialsStructType = new StructDataType("materialstype");
materialsStructType.addField(new com.yahoo.document.Field("ceiling", DataType.STRING));
materialsStructType.addField(new com.yahoo.document.Field("walls", DataType.STRING));
DocumentType docType = new DocumentType("album");
docType.addField("title", DataType.STRING);
docType.addField("artist", DataType.STRING);
StructDataType storeStructType = new StructDataType("storetype");
storeStructType.addField(new com.yahoo.document.Field("name", DataType.STRING));
storeStructType.addField(new com.yahoo.document.Field("city", DataType.STRING));
storeStructType.addField(new com.yahoo.document.Field("materials", materialsStructType));
docType.addField("store", storeStructType);
Document doc = new Document(docType, new DocumentId("id:map:album::1"));
doc.setFieldValue("title", new StringFieldValue("Black Rock"));
doc.setFieldValue("artist", new StringFieldValue("Joe Bonamassa"));
Struct material = new Struct(materialsStructType);
material.setFieldValue("ceiling", new StringFieldValue("wood"));
material.setFieldValue("walls", new StringFieldValue("brick"));
Struct store = new Struct(storeStructType);
store.setFieldValue("name", new StringFieldValue("Platekompaniet"));
store.setFieldValue("city", new StringFieldValue("Trondheim"));
store.setFieldValue(storeStructType.getField("materials"), material);
doc.setFieldValue(docType.getField("store"), store);
Map<String, String> fieldMap = new HashMap<>();
fieldMap.put("t", "title");
fieldMap.put("c", "store.city");
fieldMap.put("w", "store.materials.walls");
ProxyDocument mapped = new ProxyDocument(new TestDocumentProcessor1(), doc, fieldMap);
assertEquals(new StringFieldValue("Trondheim"), mapped.getFieldValue("c"));
assertEquals(new StringFieldValue("Black Rock"), mapped.getFieldValue("t"));
assertEquals(new StringFieldValue("brick"), mapped.getFieldValue("w"));
assertEquals(new StringFieldValue("brick"), material.getFieldValue("walls"));
mapped.setFieldValue("c", new StringFieldValue("Steinkjer"));
mapped.setFieldValue("w", new StringFieldValue("plaster"));
assertEquals(new StringFieldValue("plaster"), mapped.getFieldValue("w"));
assertEquals(new StringFieldValue("plaster"), material.getFieldValue("walls"));
assertEquals(new StringFieldValue("Steinkjer"), store.getFieldValue("city"));
assertEquals(new StringFieldValue("Steinkjer"), mapped.getFieldValue("c"));
assertEquals(new StringFieldValue("Steinkjer"), mapped.getFieldValue("c"));
mapped.setFieldValue("c", new StringFieldValue("Levanger"));
assertEquals(new StringFieldValue("Levanger"), store.getFieldValue("city"));
assertEquals(new StringFieldValue("Levanger"), mapped.getFieldValue("c"));
mapped.setFieldValue("c", mapped.getFieldValue("c") + "Kommune");
assertEquals(new StringFieldValue("LevangerKommune"), mapped.getFieldValue("c"));
}
@Test
public void testSchemaMap() {
SchemaMap map = new SchemaMap();
map.addMapping("mychain", "com.yahoo.MyDocProc", "mydoctype", "inDoc1", "inProc1");
map.addMapping("mychain", "com.yahoo.MyDocProc", "mydoctype", "inDoc2", "inProc2");
Map<Pair<String, String>, String> cMap = map.chainMap("mychain", "com.yahoo.MyDocProc");
assertEquals("inDoc1", cMap.get(new Pair<>("mydoctype", "inProc1")));
assertEquals("inDoc2", cMap.get(new Pair<>("mydoctype", "inProc2")));
assertNull(cMap.get(new Pair<>("invalidtype", "inProc2")));
Map<Pair<String, String>, String> noMap = map.chainMap("invalidchain", "com.yahoo.MyDocProc");
Map<Pair<String, String>, String> noMap2 = map.chainMap("mychain", "com.yahoo.MyInvalidDocProc");
assertTrue(noMap.isEmpty());
assertTrue(noMap2.isEmpty());
DocumentProcessor proc = new TestDocumentProcessor1();
proc.setFieldMap(cMap);
Map<String, String> dMap = proc.getDocMap("mydoctype");
assertEquals("inDoc1", dMap.get("inProc1"));
assertEquals("inDoc2", dMap.get("inProc2"));
}
@Test
public void testSchemaMapKey() {
SchemaMap map = new SchemaMap();
SchemaMap.SchemaMapKey key1 = map.new SchemaMapKey("chain", "docproc", "doctype", "from");
SchemaMap.SchemaMapKey key1_1 = map.new SchemaMapKey("chain", "docproc", "doctype", "from");
SchemaMap.SchemaMapKey key2 = map.new SchemaMapKey("chain", "docproc", "doctype2", "from");
assertTrue(key1.equals(key1_1));
assertFalse(key1.equals(key2));
}
@Test
public void testSchemaMapConfig() {
SchemaMap map = new SchemaMap();
SchemamappingConfig.Builder scb = new SchemamappingConfig.Builder();
scb.fieldmapping(new SchemamappingConfig.Fieldmapping.Builder().chain("mychain").docproc("mydocproc").doctype("mydoctype").
indocument("myindoc").inprocessor("myinprocessor"));
map.configure(new SchemamappingConfig(scb));
assertEquals(map.chainMap("mychain", "mydocproc").get(new Pair<>("mydoctype", "myinprocessor")), "myindoc");
}
@Test
public void testSchemaMapNoDocType() {
SchemaMap map = new SchemaMap();
map.addMapping("mychain", "com.yahoo.MyDocProc", null, "inDoc1", "inProc1");
map.addMapping("mychain", "com.yahoo.MyDocProc", null, "inDoc2", "inProc2");
Map<Pair<String, String>, String> cMap = map.chainMap("mychain", "com.yahoo.MyDocProc");
DocumentProcessor proc = new TestDocumentProcessor1();
proc.setFieldMap(cMap);
Map<String, String> dMap = proc.getDocMap("mydoctype");
assertEquals("inDoc1", dMap.get("inProc1"));
assertEquals("inDoc2", dMap.get("inProc2"));
}
@Test
public void testProxyAndSecure() {
DocumentProcessor procOK = new TestDPSecure();
Map<Pair<String, String>, String> fieldMap = new HashMap<>();
fieldMap.put(new Pair<>("album", "titleMapped"), "title");
procOK.setFieldMap(fieldMap);
DocumentPut put = new DocumentPut(getDoc());
Document proxyDoc = new Call(procOK).configDoc(procOK, put).getDocument();
procOK.process(Processing.of(new DocumentPut(proxyDoc)));
assertEquals(proxyDoc.getFieldValue("title").toString(), "MyTitle MyTitle");
}
@Test
public void testProxyAndSecureSecureFailing() {
DocumentProcessor procInsecure = new TestDPInsecure();
Map<Pair<String, String>, String> fieldMap = new HashMap<>();
fieldMap.put(new Pair<>("album", "titleMapped"), "title");
procInsecure.setFieldMap(fieldMap);
DocumentPut put = new DocumentPut(getDoc());
Document doc = new Call(procInsecure).configDoc(procInsecure, put).getDocument();
try {
procInsecure.process(Processing.of(new DocumentPut(doc)));
fail("Insecure docproc went through");
} catch (Exception e) {
assertTrue(e.getMessage().matches(".*allowed.*"));
}
}
/**
* To make it less likely to break schema mapping, we enforce that ProxyDocument does wrap every public
* non-static, non-final method on Document and StructuredFieldValue
*/
@Test
public void testVerifyProxyDocumentOverridesEverything() {
List<Method> allPublicFromProxyDocument = new ArrayList<>();
for (Method m : ProxyDocument.class.getDeclaredMethods()) {
if (Modifier.isPublic(m.getModifiers())) {
allPublicFromProxyDocument.add(m);
}
}
List<Method> allPublicFromDoc = new ArrayList<>();
for (Method m : Document.class.getDeclaredMethods()) {
if (mustBeOverriddenInProxyDocument(m)) {
allPublicFromDoc.add(m);
}
}
for (Method m : StructuredFieldValue.class.getDeclaredMethods()) {
if (mustBeOverriddenInProxyDocument(m)) {
allPublicFromDoc.add(m);
}
}
for (Method m : allPublicFromDoc) {
boolean thisOneOk=false;
for (Method pdM : allPublicFromProxyDocument) {
if (sameNameAndParams(m, pdM)) thisOneOk=true;
}
if (!thisOneOk) {
throw new RuntimeException("ProxyDocument must override all public methods from Document. " +
"Missing: '"+m+"'. If the method doesn't need field mapping or @Accesses check, just " +
"override it and delegate the call to 'doc'.");
}
}
}
private boolean mustBeOverriddenInProxyDocument(Method m) {
if (!Modifier.isPublic(m.getModifiers())) return false;
if (Modifier.isStatic(m.getModifiers())) return false;
if (Modifier.isFinal(m.getModifiers())) return false;
return true;
}
private boolean sameNameAndParams(Method m1, Method m2) {
if (!m1.getName().equals(m2.getName())) return false;
if (m1.getParameterTypes().length!=m2.getParameterTypes().length) return false;
for (int i = 0; i<m1.getParameterTypes().length; i++) {
if (!m1.getParameterTypes()[i].equals(m2.getParameterTypes()[i])) return false;
}
return true;
}
@Accesses(value = { @Field(dataType = "String", description = "", name = "titleMapped") })
public static class TestDPSecure extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("titleMapped", new StringFieldValue("MyTitle"));
document.setFieldValue("titleMapped", new StringFieldValue(document.getFieldValue("titleMapped").toString() + " MyTitle"));
return Progress.DONE;
}
}
@Accesses(value = { @Field(dataType = "String", description = "", name = "titleMappedFoo") })
public static class TestDPInsecure extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("titleMapped", new StringFieldValue("MyTitle"));
document.setFieldValue("titleMapped", new StringFieldValue(document.getFieldValue("titleMapped").toString() + " MyTitle"));
return Progress.DONE;
}
}
public static class TestMappingArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.setFieldValue("label", new StringFieldValue("EMI"));
return Progress.DONE;
}
}
public static class TestMappingStructInArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();;
document.setFieldValue("name", new StringFieldValue("peter"));
return Progress.DONE;
}
}
public static class TestRemovingMappingStructInArrayProcessor extends DocumentProcessor {
public Progress process(Processing processing) {
Document document = ((DocumentPut)processing.getDocumentOperations().get(0)).getDocument();
document.removeFieldValue("name");
return Progress.DONE;
}
}
} |
Instead of updating the expectations, consider updating `getDocumentRouteSelectorRawConfig()` to return a config where both routes use a selection with `testdoc` so that merging can be explicitly tested. Otherwise I think this test can be removed entirely since we already test selection-based routing of Gets in other tests (`get_document_messages_are_sent_to_the_route_handling_the_given_document_type`) | public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), "[all]"));
List<RoutingNode> selected = frame.select(1);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
} | List<RoutingNode> selected = frame.select(1); | public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), "[all]"));
List<RoutingNode> selected = frame.select(2);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10 - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"testdoc\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10 - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} |
Fixed | public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), "[all]"));
List<RoutingNode> selected = frame.select(1);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
} | List<RoutingNode> selected = frame.select(1); | public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), "[all]"));
List<RoutingNode> selected = frame.select(2);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10 - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"testdoc\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10 - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} |
Fixed | public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), "[all]"));
List<RoutingNode> selected = frame.select(1);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
} | List<RoutingNode> selected = frame.select(1); | public void multipleGetRepliesAreMergedToFoundDocument() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", getDocumentRouteSelectorRawConfig())
.addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::yarn"), "[all]"));
List<RoutingNode> selected = frame.select(2);
for (int i = 0, len = selected.size(); i < len; ++i) {
Document doc = null;
if (i == 0) {
doc = new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::yarn"));
doc.setLastModified(123456L);
}
GetDocumentReply reply = new GetDocumentReply(null);
reply.setDocument(doc);
selected.get(i).handleReply(reply);
}
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_GETDOCUMENT, reply.getType());
assertEquals(123456L, ((GetDocumentReply)reply).getLastModified());
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10 - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} | class PolicyTestCase {
private static final int TIMEOUT = 300;
private static final TimeUnit TIMEOUT_UNIT = TimeUnit.SECONDS;
private static final long TIMEOUT_MILLIS = TIMEOUT_UNIT.toMillis(TIMEOUT);
private final DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentTypeManagerConfigurer.configure(manager, "file:./test/cfg/testdoc.cfg");
}
@Test
public void testProtocol() {
DocumentProtocol protocol = new DocumentProtocol(manager);
RoutingPolicy policy = protocol.createPolicy("AND", null);
assertTrue(policy instanceof ANDPolicy);
policy = new DocumentProtocol(manager).createPolicy("DocumentRouteSelector", "raw:route[0]\n");
assertTrue(policy instanceof DocumentRouteSelectorPolicy);
policy = new DocumentProtocol(manager).createPolicy("Extern", "foo;bar/baz");
assertTrue(policy instanceof ExternPolicy);
policy = new DocumentProtocol(manager).createPolicy("LocalService", null);
assertTrue(policy instanceof LocalServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("RoundRobin", null);
assertTrue(policy instanceof RoundRobinPolicy);
policy = new DocumentProtocol(manager).createPolicy("SubsetService", null);
assertTrue(policy instanceof SubsetServicePolicy);
policy = new DocumentProtocol(manager).createPolicy("LoadBalancer", "cluster=docproc/cluster.default;session=chain.default");
assertTrue(policy instanceof LoadBalancerPolicy);
}
@Test
public void testAND() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[AND]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("foo", "bar"));
frame.setHop(new HopSpec("test", "[AND:baz]")
.addRecipient("foo")
.addRecipient("bar"));
frame.assertSelect(Arrays.asList("baz"));
frame.setHop(new HopSpec("test", "[AND:foo]"));
frame.assertMergeOneReply("foo");
frame.setHop(new HopSpec("test", "[AND:foo bar]"));
frame.assertMergeTwoReplies("foo", "bar");
frame.destroy();
}
@Test
public void requireThatExternPolicyWithIllegalParamIsAnErrorPolicy() throws ListenFailedException {
Slobrok slobrok = new Slobrok();
String spec = "tcp/localhost:" + slobrok.port();
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", null) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", "") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec) instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";") instanceof ErrorPolicy);
assertTrue(new DocumentProtocol(manager).createPolicy("Extern", spec + ";bar") instanceof ErrorPolicy);
}
@Test
public void requireThatExternPolicyWithUnknownPatternSelectsNone() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
setupExternPolicy(frame, new Slobrok(), "foo/bar");
frame.assertSelect(null);
}
@Test
public void requireThatExternPolicySelectsFromExternSlobrok() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
List<TestServer> servers = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
TestServer server = new TestServer("docproc/cluster.default/" + i, null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
servers.add(server);
}
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 10);
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
for (TestServer server : servers) {
server.destroy();
}
frame.destroy();
}
@Test
public void requireThatExternPolicyMergesOneReplyAsProtocol() throws Exception {
PolicyTestFrame frame = newPutDocumentFrame("id:ns:testdoc::");
Slobrok slobrok = new Slobrok();
TestServer server = new TestServer("docproc/cluster.default/0", null, slobrok,
new DocumentProtocol(manager));
server.net.registerSession("chain.default");
setupExternPolicy(frame, slobrok, "docproc/cluster.default/*/chain.default", 1);
frame.assertMergeOneReply(server.net.getConnectionSpec() + "/chain.default");
server.destroy();
frame.destroy();
}
@Test
public void testExternSend() throws Exception {
Slobrok local = new Slobrok();
TestServer src = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession ss = src.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok slobrok = new Slobrok();
TestServer itr = new TestServer("itr",
new RoutingTableSpec(DocumentProtocol.NAME)
.addRoute(new RouteSpec("default").addHop("dst"))
.addHop(new HopSpec("dst", "dst/session")),
slobrok, new DocumentProtocol(manager));
IntermediateSession is = itr.mb.createIntermediateSession("session", true, new Receptor(), new Receptor());
TestServer dst = new TestServer("dst", null, slobrok, new DocumentProtocol(manager));
DestinationSession ds = dst.mb.createDestinationSession("session", true, new Receptor());
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.getTrace().setLevel(9);
msg.setRoute(Route.parse("[Extern:tcp/localhost:" + slobrok.port() + ";itr/session] default"));
assertTrue(ss.send(msg).isAccepted());
assertNotNull(msg = ((Receptor)is.getMessageHandler()).getMessage(TIMEOUT));
is.forward(msg);
assertNotNull(msg = ((Receptor)ds.getMessageHandler()).getMessage(TIMEOUT));
ds.acknowledge(msg);
Reply reply = ((Receptor)is.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
is.forward(reply);
assertNotNull(reply = ((Receptor)ss.getReplyHandler()).getReply(TIMEOUT));
System.out.println(reply.getTrace().toString());
src.destroy();
itr.destroy();
dst.destroy();
slobrok.stop();
local.stop();
}
@Test
public void testExternMultipleSlobroks() throws ListenFailedException {
Slobrok local = new Slobrok();
TestServer srcServer = new TestServer("src", null, local, new DocumentProtocol(manager));
SourceSession srcSession =
srcServer.mb.createSourceSession(new Receptor(), new SourceSessionParams().setTimeout(TIMEOUT));
Slobrok extern = new Slobrok();
String spec = "tcp/localhost:" + extern.port();
TestServer dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
Receptor dstHandler = new Receptor();
DestinationSession dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
Message msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
Reply reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
dstHandler.reset();
assertNull(dstHandler.getMessage(0));
extern = new Slobrok();
spec += ",tcp/localhost:" + extern.port();
dstServer = new TestServer("dst", null, extern, new DocumentProtocol(manager));
dstHandler = new Receptor();
dstSession = dstServer.mb.createDestinationSession("session", true, dstHandler);
msg = new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::"));
msg.setRoute(Route.parse("[Extern:" + spec + ";dst/session]"));
assertTrue(srcSession.send(msg).isAccepted());
assertNotNull(msg = dstHandler.getMessage(TIMEOUT));
dstSession.acknowledge(msg);
reply = ((Receptor)srcSession.getReplyHandler()).getReply(TIMEOUT);
assertNotNull(reply);
extern.stop();
dstSession.destroy();
dstServer.destroy();
local.stop();
srcSession.destroy();
srcServer.destroy();
}
@Test
public void testLocalService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::0")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
lst.clear();
frame.setHop(new HopSpec("test", "docproc/cluster.default/[LocalService:broken]/chain.default"));
for (int i = 0; i < 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
assertEquals(recipient, "docproc/cluster.default/*/chain.default");
lst.add(recipient);
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(1, lst.size());
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.setHop(new HopSpec("test", "[LocalService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testLocalServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[LocalService]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[LocalService]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
private String getDocumentRouteSelectorRawConfig() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"testdoc\"\n" +
"route[1].feed \"myfeed\"\n]";
}
@Test
public void remove_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createRemove("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createRemove("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
@Test
public void get_document_messages_are_sent_to_the_route_handling_the_given_document_type() {
PolicyTestFrame frame = createFrameWithTwoRoutes();
frame.setMessage(createGet("id:ns:testdoc::1"));
frame.assertSelect(Arrays.asList("testdoc-route"));
frame.setMessage(createGet("id:ns:other::1"));
frame.assertSelect(Arrays.asList("other-route"));
}
private PolicyTestFrame createFrameWithTwoRoutes() {
PolicyTestFrame result = new PolicyTestFrame(manager);
result.setHop(new HopSpec("test", createDocumentRouteSelectorConfigWithTwoRoutes())
.addRecipient("testdoc-route").addRecipient("other-route"));
return result;
}
private String createDocumentRouteSelectorConfigWithTwoRoutes() {
return "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"testdoc-route\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield != '0'\"\n" +
"route[0].feed \"\"\n" +
"route[1].name \"other-route\"\n" +
"route[1].selector \"other and other.intfield != '0'\"\n" +
"route[1].feed \"\"\n]";
}
private RemoveDocumentMessage createRemove(String docId) {
return new RemoveDocumentMessage(new DocumentId(docId));
}
private GetDocumentMessage createGet(String docId) {
return new GetDocumentMessage(new DocumentId(docId));
}
@Test
public void testSubsetService() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))))));
frame.setHop(new HopSpec("test", "docproc/cluster.default/[SubsetService:2]/chain.default"));
Set<String> lst = new HashSet<>();
for (int i = 1; i <= 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", i));
RoutingNode leaf = frame.select(1).get(0);
lst.add(leaf.getRoute().toString());
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertTrue(lst.size() > 1);
String prev = null;
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String next = leaf.getRoute().toString();
if (prev == null) {
assertNotNull(next);
} else {
assertNotEquals(prev, next);
}
prev = next;
leaf.handleReply(new EmptyReply());
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
lst.clear();
for (int i = 1; i <= 10; ++i) {
RoutingNode leaf = frame.select(1).get(0);
String route = leaf.getRoute().toString();
lst.add(route);
frame.getNetwork().unregisterSession(route.substring(frame.getIdentity().length() + 1));
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10 - i));
Reply reply = new EmptyReply();
reply.addError(new Error(ErrorCode.NO_ADDRESS_FOR_SERVICE, route));
leaf.handleReply(reply);
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(10, lst.size());
frame.setHop(new HopSpec("test", "[SubsetService]"));
frame.assertMergeOneReply("*");
frame.destroy();
}
@Test
public void testSubsetServiceCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "docproc/cluster.default/[SubsetService:2]/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "docproc/cluster.default/[SubsetService:2]/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().getHop(0).toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().getHop(0).toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
@Test
public void testDocumentRouteSelector() {
String okConfig = "raw:route[0]\n";
String errConfig = "raw:" +
"route[1]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"foo bar\"\n" +
"route[0].feed \"baz\"\n";
DocumentProtocol protocol = new DocumentProtocol(manager, okConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof DocumentRouteSelectorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", errConfig) instanceof ErrorPolicy);
protocol = new DocumentProtocol(manager, errConfig);
assertTrue(protocol.createPolicy("DocumentRouteSelector", null) instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", "") instanceof ErrorPolicy);
assertTrue(protocol.createPolicy("DocumentRouteSelector", okConfig) instanceof DocumentRouteSelectorPolicy);
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"testdoc\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"other\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Message put = new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::")));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentSelectorDualCluster() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[2]\n" +
"route[0].name \"foo\"\n" +
"route[0].selector \"(testdoc AND (testdoc.intfield / 1000 > 0))\"\n" +
"route[0].feed \"myfeed\"\n" +
"route[1].name \"bar\"\n" +
"route[1].selector \"(other AND (other.intfield / 1000 > 0))\"\n" +
"route[1].feed \"myfeed\"\n]").addRecipient("foo").addRecipient("bar"));
frame.setMessage(new GetDocumentMessage(new DocumentId("id:ns:testdoc::"), "fieldSet"));
frame.assertSelect(Arrays.asList("foo"));
Document doc = new Document(manager.getDocumentType("testdoc"), new DocumentId("id:ns:testdoc::"));
doc.setFieldValue("intfield", 3000);
Message put = new PutDocumentMessage(new DocumentPut(doc));
frame.setMessage(put);
frame.assertSelect(Arrays.asList("foo"));
frame.setMessage(put);
frame.assertMergeOneReply("foo");
frame.destroy();
}
@Test
public void testDocumentRouteSelectorIgnore() {
PolicyTestFrame frame = new PolicyTestFrame(manager);
frame.setHop(new HopSpec("test", "[DocumentRouteSelector:raw:" +
"route[1]\n" +
"route[0].name \"docproc/cluster.foo\"\n" +
"route[0].selector \"testdoc and testdoc.stringfield == 'foo'\"\n" +
"route[0].feed \"myfeed\"\n]").addRecipient("docproc/cluster.foo"));
frame.setMessage(new PutDocumentMessage(
new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:yarn:testdoc:n=1234:fluff")))));
frame.select(0);
Reply reply = frame.getReceptor().getReply(TIMEOUT);
assertNotNull(reply);
assertEquals(DocumentProtocol.REPLY_DOCUMENTIGNORED, reply.getType());
assertEquals(0, reply.getNumErrors());
frame.setMessage(new UpdateDocumentMessage(new DocumentUpdate(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::"))));
frame.assertSelect(Arrays.asList("docproc/cluster.foo"));
frame.destroy();
}
@Test
public void testLoadBalancer() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
frame.getNetwork().registerSession("0/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 1));
frame.setHop(new HopSpec("test", "[LoadBalancer:cluster=docproc/cluster.default;session=chain.default]"));
assertSelect(frame, 1, Arrays.asList(frame.getNetwork().getConnectionSpec() + "/chain.default"));
}
@Test
public void testRoundRobin() {
PolicyTestFrame frame = new PolicyTestFrame("docproc/cluster.default", manager);
frame.setMessage(new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId("id:ns:testdoc::")))));
for (int i = 0; i < 10; ++i) {
frame.getNetwork().registerSession(i + "/chain.default");
}
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 10));
frame.setHop(new HopSpec("test", "[RoundRobin]")
.addRecipient("docproc/cluster.default/3/chain.default")
.addRecipient("docproc/cluster.default/6/chain.default")
.addRecipient("docproc/cluster.default/9/chain.default"));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/6/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("6/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 9));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/3/chain.default",
"docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("3/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 8));
assertSelect(frame, 32, Arrays.asList("docproc/cluster.default/9/chain.default"));
frame.getNetwork().unregisterSession("9/chain.default");
assertTrue(frame.waitSlobrok("docproc/cluster.default/*/chain.default", 7));
assertSelect(frame, 32, new ArrayList<>());
frame.setHop(new HopSpec("test", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.default"));
frame.assertMergeOneReply("docproc/cluster.default/0/chain.default");
frame.destroy();
}
@Test
public void testRoundRobinCache() {
PolicyTestFrame fooFrame = new PolicyTestFrame("docproc/cluster.default", manager);
HopSpec fooHop = new HopSpec("foo", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.foo");
fooFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::foo")));
fooFrame.setHop(fooHop);
PolicyTestFrame barFrame = new PolicyTestFrame(fooFrame);
HopSpec barHop = new HopSpec("bar", "[RoundRobin]").addRecipient("docproc/cluster.default/0/chain.bar");
barFrame.setMessage(new RemoveDocumentMessage(new DocumentId("id:ns:testdoc::bar")));
barFrame.setHop(barHop);
fooFrame.getMessageBus().setupRouting(
new RoutingSpec().addTable(new RoutingTableSpec(DocumentProtocol.NAME)
.addHop(fooHop)
.addHop(barHop)));
fooFrame.getNetwork().registerSession("0/chain.foo");
fooFrame.getNetwork().registerSession("0/chain.bar");
assertTrue(fooFrame.waitSlobrok("docproc/cluster.default/0/*", 2));
RoutingNode fooChild = fooFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.foo", fooChild.getRoute().toString());
RoutingNode barChild = barFrame.select(1).get(0);
assertEquals("docproc/cluster.default/0/chain.bar", barChild.getRoute().toString());
barChild.handleReply(new EmptyReply());
fooChild.handleReply(new EmptyReply());
assertNotNull(barFrame.getReceptor().getReply(TIMEOUT));
assertNotNull(fooFrame.getReceptor().getReply(TIMEOUT));
}
/**
* Ensures that the given number of select passes on the given frame produces an expected list of recipients.
*
* @param frame The frame to select on.
* @param numSelects The number of selects to perform.
* @param expected The list of expected recipients.
*/
private static void assertSelect(PolicyTestFrame frame, int numSelects, List<String> expected) {
Set<String> lst = new TreeSet<>();
for (int i = 0; i < numSelects; ++i) {
if (!expected.isEmpty()) {
RoutingNode leaf = frame.select(1).get(0);
String recipient = leaf.getRoute().toString();
lst.add(recipient);
leaf.handleReply(new EmptyReply());
} else {
frame.select(0);
}
assertNotNull(frame.getReceptor().getReply(TIMEOUT));
}
assertEquals(expected.size(), lst.size());
Iterator<String> it = lst.iterator();
for (String recipient : expected) {
assertEquals(recipient, it.next());
}
}
private static void assertMirrorReady(Mirror slobrok)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.ready()) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private static void assertMirrorContains(IMirror slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
for (int i = 0; i < TIMEOUT_MILLIS / 10; ++i) {
if (slobrok.lookup(pattern).size() == numEntries) {
return;
}
Thread.sleep(10);
}
throw new TimeoutException();
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern)
throws InterruptedException, TimeoutException
{
setupExternPolicy(frame, slobrok, pattern, -1);
}
private void setupExternPolicy(PolicyTestFrame frame, Slobrok slobrok, String pattern, int numEntries)
throws InterruptedException, TimeoutException
{
String param = "tcp/localhost:" + slobrok.port() + ";" + pattern;
frame.setHop(new HopSpec("test", "[Extern:" + param + "]"));
MessageBus mbus = frame.getMessageBus();
HopBlueprint hop = mbus.getRoutingTable(DocumentProtocol.NAME).getHop("test");
PolicyDirective dir = (PolicyDirective)hop.getDirective(0);
ExternPolicy policy = (ExternPolicy)mbus.getRoutingPolicy(DocumentProtocol.NAME, dir.getName(), dir.getParam());
assertMirrorReady(policy.getMirror());
if (numEntries >= 0) {
assertMirrorContains(policy.getMirror(), pattern, numEntries);
}
}
private PolicyTestFrame newFrame() {
return new PolicyTestFrame(manager);
}
private PolicyTestFrame newFrame(Message msg) {
PolicyTestFrame frame = newFrame();
frame.setMessage(msg);
return frame;
}
private PutDocumentMessage newPutDocument(String documentId) {
return new PutDocumentMessage(new DocumentPut(new Document(manager.getDocumentType("testdoc"),
new DocumentId(documentId))));
}
private PolicyTestFrame newPutDocumentFrame(String documentId) {
return newFrame(newPutDocument(documentId));
}
} |
Nit: Add `this` for consistency | public ResourceSnapshot(ApplicationId applicationId, double cpuCores, double memoryGb, double diskGb, Instant timestamp) {
this.applicationId = applicationId;
resourceAllocation = new ResourceAllocation(cpuCores, memoryGb, diskGb);
this.timestamp = timestamp;
} | resourceAllocation = new ResourceAllocation(cpuCores, memoryGb, diskGb); | public ResourceSnapshot(ApplicationId applicationId, double cpuCores, double memoryGb, double diskGb, Instant timestamp) {
this.applicationId = applicationId;
this.resourceAllocation = new ResourceAllocation(cpuCores, memoryGb, diskGb);
this.timestamp = timestamp;
} | class ResourceSnapshot {
private final ApplicationId applicationId;
private final ResourceAllocation resourceAllocation;
private final Instant timestamp;
public static ResourceSnapshot from(List<NodeRepositoryNode> nodes, Instant timestamp) {
Set<ApplicationId> applicationIds = nodes.stream()
.map(n -> ApplicationId.from(n.getOwner().tenant, n.getOwner().application, n.getOwner().instance))
.collect(Collectors.toSet());
if (applicationIds.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one application");
return new ResourceSnapshot(
applicationIds.iterator().next(),
nodes.stream().mapToDouble(NodeRepositoryNode::getMinCpuCores).sum(),
nodes.stream().mapToDouble(NodeRepositoryNode::getMinMainMemoryAvailableGb).sum(),
nodes.stream().mapToDouble(NodeRepositoryNode::getMinDiskAvailableGb).sum(),
timestamp
);
}
public ApplicationId getApplicationId() {
return applicationId;
}
public double getCpuCores() {
return resourceAllocation.getCpuCores();
}
public double getMemoryGb() {
return resourceAllocation.getMemoryGb();
}
public double getDiskGb() {
return resourceAllocation.getDiskGb();
}
public Instant getTimestamp() {
return timestamp;
}
} | class ResourceSnapshot {
private final ApplicationId applicationId;
private final ResourceAllocation resourceAllocation;
private final Instant timestamp;
public static ResourceSnapshot from(List<NodeRepositoryNode> nodes, Instant timestamp) {
Set<ApplicationId> applicationIds = nodes.stream()
.map(n -> ApplicationId.from(n.getOwner().tenant, n.getOwner().application, n.getOwner().instance))
.collect(Collectors.toSet());
if (applicationIds.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one application");
return new ResourceSnapshot(
applicationIds.iterator().next(),
nodes.stream().mapToDouble(NodeRepositoryNode::getMinCpuCores).sum(),
nodes.stream().mapToDouble(NodeRepositoryNode::getMinMainMemoryAvailableGb).sum(),
nodes.stream().mapToDouble(NodeRepositoryNode::getMinDiskAvailableGb).sum(),
timestamp
);
}
public ApplicationId getApplicationId() {
return applicationId;
}
public double getCpuCores() {
return resourceAllocation.getCpuCores();
}
public double getMemoryGb() {
return resourceAllocation.getMemoryGb();
}
public double getDiskGb() {
return resourceAllocation.getDiskGb();
}
public Instant getTimestamp() {
return timestamp;
}
} |
Can return `null` if `consume()` was never called. Is this intentional? | public List<ResourceSnapshot> consumedResources() {
return this.resources;
} | return this.resources; | public List<ResourceSnapshot> consumedResources() {
return this.resources;
} | class MockMeteringClient implements MeteringClient {
private List<ResourceSnapshot> resources;
@Override
public void consume(List<ResourceSnapshot> resources){
this.resources = resources;
}
@Override
public MeteringInfo getResourceSnapshots(String tenantName, String applicationName) {
ResourceAllocation emptyAllocation = new ResourceAllocation(0, 0, 0);
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, new ArrayList<>());
return new MeteringInfo(emptyAllocation, emptyAllocation, emptyAllocation, snapshotHistory);
}
} | class MockMeteringClient implements MeteringClient {
private List<ResourceSnapshot> resources = new ArrayList<>();
@Override
public void consume(List<ResourceSnapshot> resources){
this.resources = resources;
}
@Override
public MeteringInfo getResourceSnapshots(String tenantName, String applicationName) {
ResourceAllocation emptyAllocation = new ResourceAllocation(0, 0, 0);
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, new ArrayList<>());
return new MeteringInfo(emptyAllocation, emptyAllocation, emptyAllocation, snapshotHistory);
}
} |
See previous comment on `TestProperties`. | public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels, AttributeFields attributeFields) {
this.name = rankProfile.getName();
compressedProperties = compress(new Deriver(rankProfile, queryProfiles, importedModels, attributeFields, new TestProperties()).derive());
} | compressedProperties = compress(new Deriver(rankProfile, queryProfiles, importedModels, attributeFields, new TestProperties()).derive()); | public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels, AttributeFields attributeFields) {
this(rankProfile, queryProfiles, importedModels, attributeFields, new TestProperties());
} | class RawRankProfile implements RankProfilesConfig.Producer {
/** A reusable compressor with default settings */
private static final Compressor compressor = new Compressor();
private final String keyEndMarker = "\r=";
private final String valueEndMarker = "\r\n";
public final static String summaryFeatureFefPropertyPrefix = "vespa.summary.feature";
public final static String rankFeatureFefPropertyPrefix = "vespa.dump.feature";
private final String name;
private final Compressor.Compression compressedProperties;
/**
* Creates a raw rank profile from the given rank profile
*/
public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels, AttributeFields attributeFields, ModelContext.Properties deployProperties) {
this.name = rankProfile.getName();
compressedProperties = compress(new Deriver(rankProfile, queryProfiles, importedModels, attributeFields, deployProperties).derive());
}
/**
* Only for testing
*/
private Compressor.Compression compress(List<Pair<String, String>> properties) {
StringBuilder b = new StringBuilder();
for (Pair<String, String> property : properties)
b.append(property.getFirst()).append(keyEndMarker).append(property.getSecond()).append(valueEndMarker);
return compressor.compress(b.toString().getBytes(StandardCharsets.UTF_8));
}
private List<Pair<String, String>> decompress(Compressor.Compression compression) {
String propertiesString = new String(compressor.decompress(compression), StandardCharsets.UTF_8);
if (propertiesString.isEmpty()) return ImmutableList.of();
ImmutableList.Builder<Pair<String, String>> properties = new ImmutableList.Builder<>();
for (String propertyString : propertiesString.split(valueEndMarker)) {
String[] property = propertyString.split(keyEndMarker);
properties.add(new Pair<>(property[0], property[1]));
}
return properties.build();
}
public String getName() { return name; }
@Override
public String toString() {
return " rank profile " + name;
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
RankProfilesConfig.Rankprofile.Builder b = new RankProfilesConfig.Rankprofile.Builder().name(getName());
getRankProperties(b);
builder.rankprofile(b);
}
private void getRankProperties(RankProfilesConfig.Rankprofile.Builder b) {
RankProfilesConfig.Rankprofile.Fef.Builder fefB = new RankProfilesConfig.Rankprofile.Fef.Builder();
for (Pair<String, String> p : decompress(compressedProperties))
fefB.property(new RankProfilesConfig.Rankprofile.Fef.Property.Builder().name(p.getFirst()).value(p.getSecond()));
b.fef(fefB);
}
/**
* Returns the properties of this as an unmodifiable list.
* Note: This method is expensive.
*/
public List<Pair<String, String>> configProperties() { return decompress(compressedProperties); }
private static class Deriver {
/**
* The field rank settings of this profile
*/
private Map<String, FieldRankSettings> fieldRankSettings = new java.util.LinkedHashMap<>();
private RankingExpression firstPhaseRanking = null;
private RankingExpression secondPhaseRanking = null;
private Set<ReferenceNode> summaryFeatures = new LinkedHashSet<>();
private Set<ReferenceNode> rankFeatures = new LinkedHashSet<>();
private List<RankProfile.RankProperty> rankProperties = new ArrayList<>();
/**
* Rank properties for weight settings to make these available to feature executors
*/
private List<RankProfile.RankProperty> boostAndWeightRankProperties = new ArrayList<>();
private boolean ignoreDefaultRankFeatures = false;
private RankProfile.MatchPhaseSettings matchPhaseSettings = null;
private int rerankCount = -1;
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private double termwiseLimit = 1.0;
private double rankScoreDropLimit = -Double.MAX_VALUE;
/**
* The rank type definitions used to derive settings for the native rank features
*/
private final NativeRankTypeDefinitionSet nativeRankTypeDefinitions = new NativeRankTypeDefinitionSet("default");
private final Map<String, String> attributeTypes;
private final Map<String, String> queryFeatureTypes;
private Set<String> filterFields = new java.util.LinkedHashSet<>();
/**
* Creates a raw rank profile from the given rank profile
*/
Deriver(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
AttributeFields attributeFields, ModelContext.Properties deployProperties)
{
RankProfile compiled = rankProfile.compile(queryProfiles, importedModels);
attributeTypes = compiled.getAttributeTypes();
queryFeatureTypes = compiled.getQueryFeatureTypes();
deriveRankingFeatures(compiled, deployProperties);
deriveRankTypeSetting(compiled, attributeFields);
deriveFilterFields(compiled);
deriveWeightProperties(compiled);
}
private void deriveFilterFields(RankProfile rp) {
filterFields.addAll(rp.allFilterFields());
}
private void deriveRankingFeatures(RankProfile rankProfile, ModelContext.Properties deployProperties) {
firstPhaseRanking = rankProfile.getFirstPhaseRanking();
secondPhaseRanking = rankProfile.getSecondPhaseRanking();
summaryFeatures = new LinkedHashSet<>(rankProfile.getSummaryFeatures());
rankFeatures = rankProfile.getRankFeatures();
rerankCount = rankProfile.getRerankCount();
matchPhaseSettings = rankProfile.getMatchPhaseSettings();
numThreadsPerSearch = rankProfile.getNumThreadsPerSearch();
minHitsPerThread = rankProfile.getMinHitsPerThread();
numSearchPartitions = rankProfile.getNumSearchPartitions();
termwiseLimit = rankProfile.getTermwiseLimit().orElse(deployProperties.defaultTermwiseLimit());
keepRankCount = rankProfile.getKeepRankCount();
rankScoreDropLimit = rankProfile.getRankScoreDropLimit();
ignoreDefaultRankFeatures = rankProfile.getIgnoreDefaultRankFeatures();
rankProperties = new ArrayList<>(rankProfile.getRankProperties());
derivePropertiesAndSummaryFeaturesFromFunctions(rankProfile.getFunctions());
}
private void derivePropertiesAndSummaryFeaturesFromFunctions(Map<String, RankProfile.RankingExpressionFunction> functions) {
if (functions.isEmpty()) return;
List<ExpressionFunction> functionExpressions = functions.values().stream().map(f -> f.function()).collect(Collectors.toList());
Map<String, String> functionProperties = new LinkedHashMap<>();
functionProperties.putAll(deriveFunctionProperties(functions, functionExpressions));
if (firstPhaseRanking != null) {
functionProperties.putAll(firstPhaseRanking.getRankProperties(functionExpressions));
}
if (secondPhaseRanking != null) {
functionProperties.putAll(secondPhaseRanking.getRankProperties(functionExpressions));
}
for (Map.Entry<String, String> e : functionProperties.entrySet()) {
rankProperties.add(new RankProfile.RankProperty(e.getKey(), e.getValue()));
}
SerializationContext context = new SerializationContext(functionExpressions, null, functionProperties);
replaceFunctionSummaryFeatures(context);
}
private Map<String, String> deriveFunctionProperties(Map<String, RankProfile.RankingExpressionFunction> functions,
List<ExpressionFunction> functionExpressions) {
SerializationContext context = new SerializationContext(functionExpressions);
for (Map.Entry<String, RankProfile.RankingExpressionFunction> e : functions.entrySet()) {
String expressionString = e.getValue().function().getBody().getRoot().toString(new StringBuilder(), context, null, null).toString();
context.addFunctionSerialization(RankingExpression.propertyName(e.getKey()), expressionString);
for (Map.Entry<String, TensorType> argumentType : e.getValue().function().argumentTypes().entrySet())
context.addArgumentTypeSerialization(e.getKey(), argumentType.getKey(), argumentType.getValue());
if (e.getValue().function().returnType().isPresent())
context.addFunctionTypeSerialization(e.getKey(), e.getValue().function().returnType().get());
}
return context.serializedFunctions();
}
private void replaceFunctionSummaryFeatures(SerializationContext context) {
if (summaryFeatures == null) return;
Map<String, ReferenceNode> functionSummaryFeatures = new LinkedHashMap<>();
for (Iterator<ReferenceNode> i = summaryFeatures.iterator(); i.hasNext(); ) {
ReferenceNode referenceNode = i.next();
if (context.getFunction(referenceNode.getName()) != null) {
context.addFunctionSerialization(RankingExpression.propertyName(referenceNode.getName()),
referenceNode.toString(new StringBuilder(), context, null, null).toString());
ReferenceNode newReferenceNode = new ReferenceNode("rankingExpression(" + referenceNode.getName() + ")", referenceNode.getArguments().expressions(), referenceNode.getOutput());
functionSummaryFeatures.put(referenceNode.getName(), newReferenceNode);
i.remove();
}
}
for (Map.Entry<String, ReferenceNode> e : functionSummaryFeatures.entrySet()) {
summaryFeatures.add(e.getValue());
}
}
private void deriveWeightProperties(RankProfile rankProfile) {
for (RankProfile.RankSetting setting : rankProfile.rankSettings()) {
if (!setting.getType().equals(RankProfile.RankSetting.Type.WEIGHT)) {
continue;
}
boostAndWeightRankProperties.add(new RankProfile.RankProperty("vespa.fieldweight." + setting.getFieldName(),
String.valueOf(setting.getIntValue())));
}
}
/**
* Adds the type boosts from a rank profile
*/
private void deriveRankTypeSetting(RankProfile rankProfile, AttributeFields attributeFields) {
for (Iterator<RankProfile.RankSetting> i = rankProfile.rankSettingIterator(); i.hasNext(); ) {
RankProfile.RankSetting setting = i.next();
if (!setting.getType().equals(RankProfile.RankSetting.Type.RANKTYPE)) continue;
deriveNativeRankTypeSetting(setting.getFieldName(), (RankType) setting.getValue(), attributeFields,
hasDefaultRankTypeSetting(rankProfile, setting.getFieldName()));
}
}
private void deriveNativeRankTypeSetting(String fieldName, RankType rankType, AttributeFields attributeFields, boolean isDefaultSetting) {
if (isDefaultSetting) return;
NativeRankTypeDefinition definition = nativeRankTypeDefinitions.getRankTypeDefinition(rankType);
if (definition == null) throw new IllegalArgumentException("In field '" + fieldName + "': " +
rankType + " is known but has no implementation. " +
"Supported rank types: " +
nativeRankTypeDefinitions.types().keySet());
FieldRankSettings settings = deriveFieldRankSettings(fieldName);
for (Iterator<NativeTable> i = definition.rankSettingIterator(); i.hasNext(); ) {
NativeTable table = i.next();
if ((FieldRankSettings.isIndexFieldTable(table) && attributeFields.getAttribute(fieldName) == null) ||
(FieldRankSettings.isAttributeFieldTable(table) && attributeFields.getAttribute(fieldName) != null)) {
settings.addTable(table);
}
}
}
private boolean hasDefaultRankTypeSetting(RankProfile rankProfile, String fieldName) {
RankProfile.RankSetting setting =
rankProfile.getRankSetting(fieldName, RankProfile.RankSetting.Type.RANKTYPE);
return setting != null && setting.getValue().equals(RankType.DEFAULT);
}
private FieldRankSettings deriveFieldRankSettings(String fieldName) {
FieldRankSettings settings = fieldRankSettings.get(fieldName);
if (settings == null) {
settings = new FieldRankSettings(fieldName);
fieldRankSettings.put(fieldName, settings);
}
return settings;
}
/** Derives the properties this produces */
public List<Pair<String, String>> derive() {
List<Pair<String, String>> properties = new ArrayList<>();
for (RankProfile.RankProperty property : rankProperties) {
if ("rankingExpression(firstphase).rankingScript".equals(property.getName())) {
try {
firstPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
throw new IllegalArgumentException("Could not parse second phase expression", e);
}
}
else if ("rankingExpression(secondphase).rankingScript".equals(property.getName())) {
try {
secondPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
throw new IllegalArgumentException("Could not parse second phase expression", e);
}
}
else {
properties.add(new Pair<>(property.getName(), property.getValue()));
}
}
properties.addAll(deriveRankingPhaseRankProperties(firstPhaseRanking, "firstphase"));
properties.addAll(deriveRankingPhaseRankProperties(secondPhaseRanking, "secondphase"));
for (FieldRankSettings settings : fieldRankSettings.values()) {
properties.addAll(settings.deriveRankProperties());
}
for (RankProfile.RankProperty property : boostAndWeightRankProperties) {
properties.add(new Pair<>(property.getName(), property.getValue()));
}
for (ReferenceNode feature : summaryFeatures) {
properties.add(new Pair<>(summaryFeatureFefPropertyPrefix, feature.toString()));
}
for (ReferenceNode feature : rankFeatures) {
properties.add(new Pair<>(rankFeatureFefPropertyPrefix, feature.toString()));
}
if (numThreadsPerSearch > 0) {
properties.add(new Pair<>("vespa.matching.numthreadspersearch", numThreadsPerSearch + ""));
}
if (minHitsPerThread > 0) {
properties.add(new Pair<>("vespa.matching.minhitsperthread", minHitsPerThread + ""));
}
if (numSearchPartitions >= 0) {
properties.add(new Pair<>("vespa.matching.numsearchpartitions", numSearchPartitions + ""));
}
if (termwiseLimit < 1.0) {
properties.add(new Pair<>("vespa.matching.termwise_limit", termwiseLimit + ""));
}
if (matchPhaseSettings != null) {
properties.add(new Pair<>("vespa.matchphase.degradation.attribute", matchPhaseSettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.degradation.ascendingorder", matchPhaseSettings.getAscending() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.maxhits", matchPhaseSettings.getMaxHits() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.maxfiltercoverage", matchPhaseSettings.getMaxFilterCoverage() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.samplepercentage", matchPhaseSettings.getEvaluationPoint() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.postfiltermultiplier", matchPhaseSettings.getPrePostFilterTippingPoint() + ""));
RankProfile.DiversitySettings diversitySettings = matchPhaseSettings.getDiversity();
if (diversitySettings != null) {
properties.add(new Pair<>("vespa.matchphase.diversity.attribute", diversitySettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.diversity.mingroups", String.valueOf(diversitySettings.getMinGroups())));
properties.add(new Pair<>("vespa.matchphase.diversity.cutoff.factor", String.valueOf(diversitySettings.getCutoffFactor())));
properties.add(new Pair<>("vespa.matchphase.diversity.cutoff.strategy", String.valueOf(diversitySettings.getCutoffStrategy())));
}
}
if (rerankCount > -1) {
properties.add(new Pair<>("vespa.hitcollector.heapsize", rerankCount + ""));
}
if (keepRankCount > -1) {
properties.add(new Pair<>("vespa.hitcollector.arraysize", keepRankCount + ""));
}
if (rankScoreDropLimit > -Double.MAX_VALUE) {
properties.add(new Pair<>("vespa.hitcollector.rankscoredroplimit", rankScoreDropLimit + ""));
}
if (ignoreDefaultRankFeatures) {
properties.add(new Pair<>("vespa.dump.ignoredefaultfeatures", String.valueOf(true)));
}
Iterator filterFieldsIterator = filterFields.iterator();
while (filterFieldsIterator.hasNext()) {
String fieldName = (String) filterFieldsIterator.next();
properties.add(new Pair<>("vespa.isfilterfield." + fieldName, String.valueOf(true)));
}
for (Map.Entry<String, String> attributeType : attributeTypes.entrySet()) {
properties.add(new Pair<>("vespa.type.attribute." + attributeType.getKey(), attributeType.getValue()));
}
for (Map.Entry<String, String> queryFeatureType : queryFeatureTypes.entrySet()) {
properties.add(new Pair<>("vespa.type.query." + queryFeatureType.getKey(), queryFeatureType.getValue()));
}
if (properties.size() >= 1000000) throw new RuntimeException("Too many rank properties");
return properties;
}
private List<Pair<String, String>> deriveRankingPhaseRankProperties(RankingExpression expression, String phase) {
List<Pair<String, String>> properties = new ArrayList<>();
if (expression == null) return properties;
String name = expression.getName();
if ("".equals(name))
name = phase;
if (expression.getRoot() instanceof ReferenceNode) {
properties.add(new Pair<>("vespa.rank." + phase, expression.getRoot().toString()));
} else {
properties.add(new Pair<>("vespa.rank." + phase, "rankingExpression(" + name + ")"));
properties.add(new Pair<>("rankingExpression(" + name + ").rankingScript", expression.getRoot().toString()));
}
return properties;
}
}
} | class RawRankProfile implements RankProfilesConfig.Producer {
/** A reusable compressor with default settings */
private static final Compressor compressor = new Compressor();
private final String keyEndMarker = "\r=";
private final String valueEndMarker = "\r\n";
public final static String summaryFeatureFefPropertyPrefix = "vespa.summary.feature";
public final static String rankFeatureFefPropertyPrefix = "vespa.dump.feature";
private final String name;
private final Compressor.Compression compressedProperties;
/**
* Creates a raw rank profile from the given rank profile
*/
public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels, AttributeFields attributeFields, ModelContext.Properties deployProperties) {
this.name = rankProfile.getName();
compressedProperties = compress(new Deriver(rankProfile, queryProfiles, importedModels, attributeFields, deployProperties).derive());
}
/**
* Only for testing
*/
private Compressor.Compression compress(List<Pair<String, String>> properties) {
StringBuilder b = new StringBuilder();
for (Pair<String, String> property : properties)
b.append(property.getFirst()).append(keyEndMarker).append(property.getSecond()).append(valueEndMarker);
return compressor.compress(b.toString().getBytes(StandardCharsets.UTF_8));
}
private List<Pair<String, String>> decompress(Compressor.Compression compression) {
String propertiesString = new String(compressor.decompress(compression), StandardCharsets.UTF_8);
if (propertiesString.isEmpty()) return ImmutableList.of();
ImmutableList.Builder<Pair<String, String>> properties = new ImmutableList.Builder<>();
for (String propertyString : propertiesString.split(valueEndMarker)) {
String[] property = propertyString.split(keyEndMarker);
properties.add(new Pair<>(property[0], property[1]));
}
return properties.build();
}
public String getName() { return name; }
@Override
public String toString() {
return " rank profile " + name;
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
RankProfilesConfig.Rankprofile.Builder b = new RankProfilesConfig.Rankprofile.Builder().name(getName());
getRankProperties(b);
builder.rankprofile(b);
}
private void getRankProperties(RankProfilesConfig.Rankprofile.Builder b) {
RankProfilesConfig.Rankprofile.Fef.Builder fefB = new RankProfilesConfig.Rankprofile.Fef.Builder();
for (Pair<String, String> p : decompress(compressedProperties))
fefB.property(new RankProfilesConfig.Rankprofile.Fef.Property.Builder().name(p.getFirst()).value(p.getSecond()));
b.fef(fefB);
}
/**
* Returns the properties of this as an unmodifiable list.
* Note: This method is expensive.
*/
public List<Pair<String, String>> configProperties() { return decompress(compressedProperties); }
private static class Deriver {
/**
* The field rank settings of this profile
*/
private Map<String, FieldRankSettings> fieldRankSettings = new java.util.LinkedHashMap<>();
private RankingExpression firstPhaseRanking = null;
private RankingExpression secondPhaseRanking = null;
private Set<ReferenceNode> summaryFeatures = new LinkedHashSet<>();
private Set<ReferenceNode> rankFeatures = new LinkedHashSet<>();
private List<RankProfile.RankProperty> rankProperties = new ArrayList<>();
/**
* Rank properties for weight settings to make these available to feature executors
*/
private List<RankProfile.RankProperty> boostAndWeightRankProperties = new ArrayList<>();
private boolean ignoreDefaultRankFeatures = false;
private RankProfile.MatchPhaseSettings matchPhaseSettings = null;
private int rerankCount = -1;
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private double termwiseLimit = 1.0;
private double rankScoreDropLimit = -Double.MAX_VALUE;
/**
* The rank type definitions used to derive settings for the native rank features
*/
private final NativeRankTypeDefinitionSet nativeRankTypeDefinitions = new NativeRankTypeDefinitionSet("default");
private final Map<String, String> attributeTypes;
private final Map<String, String> queryFeatureTypes;
private Set<String> filterFields = new java.util.LinkedHashSet<>();
/**
* Creates a raw rank profile from the given rank profile
*/
Deriver(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
AttributeFields attributeFields, ModelContext.Properties deployProperties)
{
RankProfile compiled = rankProfile.compile(queryProfiles, importedModels);
attributeTypes = compiled.getAttributeTypes();
queryFeatureTypes = compiled.getQueryFeatureTypes();
deriveRankingFeatures(compiled, deployProperties);
deriveRankTypeSetting(compiled, attributeFields);
deriveFilterFields(compiled);
deriveWeightProperties(compiled);
}
private void deriveFilterFields(RankProfile rp) {
filterFields.addAll(rp.allFilterFields());
}
private void deriveRankingFeatures(RankProfile rankProfile, ModelContext.Properties deployProperties) {
firstPhaseRanking = rankProfile.getFirstPhaseRanking();
secondPhaseRanking = rankProfile.getSecondPhaseRanking();
summaryFeatures = new LinkedHashSet<>(rankProfile.getSummaryFeatures());
rankFeatures = rankProfile.getRankFeatures();
rerankCount = rankProfile.getRerankCount();
matchPhaseSettings = rankProfile.getMatchPhaseSettings();
numThreadsPerSearch = rankProfile.getNumThreadsPerSearch();
minHitsPerThread = rankProfile.getMinHitsPerThread();
numSearchPartitions = rankProfile.getNumSearchPartitions();
termwiseLimit = rankProfile.getTermwiseLimit().orElse(deployProperties.defaultTermwiseLimit());
keepRankCount = rankProfile.getKeepRankCount();
rankScoreDropLimit = rankProfile.getRankScoreDropLimit();
ignoreDefaultRankFeatures = rankProfile.getIgnoreDefaultRankFeatures();
rankProperties = new ArrayList<>(rankProfile.getRankProperties());
derivePropertiesAndSummaryFeaturesFromFunctions(rankProfile.getFunctions());
}
private void derivePropertiesAndSummaryFeaturesFromFunctions(Map<String, RankProfile.RankingExpressionFunction> functions) {
if (functions.isEmpty()) return;
List<ExpressionFunction> functionExpressions = functions.values().stream().map(f -> f.function()).collect(Collectors.toList());
Map<String, String> functionProperties = new LinkedHashMap<>();
functionProperties.putAll(deriveFunctionProperties(functions, functionExpressions));
if (firstPhaseRanking != null) {
functionProperties.putAll(firstPhaseRanking.getRankProperties(functionExpressions));
}
if (secondPhaseRanking != null) {
functionProperties.putAll(secondPhaseRanking.getRankProperties(functionExpressions));
}
for (Map.Entry<String, String> e : functionProperties.entrySet()) {
rankProperties.add(new RankProfile.RankProperty(e.getKey(), e.getValue()));
}
SerializationContext context = new SerializationContext(functionExpressions, null, functionProperties);
replaceFunctionSummaryFeatures(context);
}
private Map<String, String> deriveFunctionProperties(Map<String, RankProfile.RankingExpressionFunction> functions,
List<ExpressionFunction> functionExpressions) {
SerializationContext context = new SerializationContext(functionExpressions);
for (Map.Entry<String, RankProfile.RankingExpressionFunction> e : functions.entrySet()) {
String expressionString = e.getValue().function().getBody().getRoot().toString(new StringBuilder(), context, null, null).toString();
context.addFunctionSerialization(RankingExpression.propertyName(e.getKey()), expressionString);
for (Map.Entry<String, TensorType> argumentType : e.getValue().function().argumentTypes().entrySet())
context.addArgumentTypeSerialization(e.getKey(), argumentType.getKey(), argumentType.getValue());
if (e.getValue().function().returnType().isPresent())
context.addFunctionTypeSerialization(e.getKey(), e.getValue().function().returnType().get());
}
return context.serializedFunctions();
}
private void replaceFunctionSummaryFeatures(SerializationContext context) {
if (summaryFeatures == null) return;
Map<String, ReferenceNode> functionSummaryFeatures = new LinkedHashMap<>();
for (Iterator<ReferenceNode> i = summaryFeatures.iterator(); i.hasNext(); ) {
ReferenceNode referenceNode = i.next();
if (context.getFunction(referenceNode.getName()) != null) {
context.addFunctionSerialization(RankingExpression.propertyName(referenceNode.getName()),
referenceNode.toString(new StringBuilder(), context, null, null).toString());
ReferenceNode newReferenceNode = new ReferenceNode("rankingExpression(" + referenceNode.getName() + ")", referenceNode.getArguments().expressions(), referenceNode.getOutput());
functionSummaryFeatures.put(referenceNode.getName(), newReferenceNode);
i.remove();
}
}
for (Map.Entry<String, ReferenceNode> e : functionSummaryFeatures.entrySet()) {
summaryFeatures.add(e.getValue());
}
}
private void deriveWeightProperties(RankProfile rankProfile) {
for (RankProfile.RankSetting setting : rankProfile.rankSettings()) {
if (!setting.getType().equals(RankProfile.RankSetting.Type.WEIGHT)) {
continue;
}
boostAndWeightRankProperties.add(new RankProfile.RankProperty("vespa.fieldweight." + setting.getFieldName(),
String.valueOf(setting.getIntValue())));
}
}
/**
* Adds the type boosts from a rank profile
*/
private void deriveRankTypeSetting(RankProfile rankProfile, AttributeFields attributeFields) {
for (Iterator<RankProfile.RankSetting> i = rankProfile.rankSettingIterator(); i.hasNext(); ) {
RankProfile.RankSetting setting = i.next();
if (!setting.getType().equals(RankProfile.RankSetting.Type.RANKTYPE)) continue;
deriveNativeRankTypeSetting(setting.getFieldName(), (RankType) setting.getValue(), attributeFields,
hasDefaultRankTypeSetting(rankProfile, setting.getFieldName()));
}
}
private void deriveNativeRankTypeSetting(String fieldName, RankType rankType, AttributeFields attributeFields, boolean isDefaultSetting) {
if (isDefaultSetting) return;
NativeRankTypeDefinition definition = nativeRankTypeDefinitions.getRankTypeDefinition(rankType);
if (definition == null) throw new IllegalArgumentException("In field '" + fieldName + "': " +
rankType + " is known but has no implementation. " +
"Supported rank types: " +
nativeRankTypeDefinitions.types().keySet());
FieldRankSettings settings = deriveFieldRankSettings(fieldName);
for (Iterator<NativeTable> i = definition.rankSettingIterator(); i.hasNext(); ) {
NativeTable table = i.next();
if ((FieldRankSettings.isIndexFieldTable(table) && attributeFields.getAttribute(fieldName) == null) ||
(FieldRankSettings.isAttributeFieldTable(table) && attributeFields.getAttribute(fieldName) != null)) {
settings.addTable(table);
}
}
}
private boolean hasDefaultRankTypeSetting(RankProfile rankProfile, String fieldName) {
RankProfile.RankSetting setting =
rankProfile.getRankSetting(fieldName, RankProfile.RankSetting.Type.RANKTYPE);
return setting != null && setting.getValue().equals(RankType.DEFAULT);
}
private FieldRankSettings deriveFieldRankSettings(String fieldName) {
FieldRankSettings settings = fieldRankSettings.get(fieldName);
if (settings == null) {
settings = new FieldRankSettings(fieldName);
fieldRankSettings.put(fieldName, settings);
}
return settings;
}
/** Derives the properties this produces */
public List<Pair<String, String>> derive() {
List<Pair<String, String>> properties = new ArrayList<>();
for (RankProfile.RankProperty property : rankProperties) {
if ("rankingExpression(firstphase).rankingScript".equals(property.getName())) {
try {
firstPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
throw new IllegalArgumentException("Could not parse second phase expression", e);
}
}
else if ("rankingExpression(secondphase).rankingScript".equals(property.getName())) {
try {
secondPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
throw new IllegalArgumentException("Could not parse second phase expression", e);
}
}
else {
properties.add(new Pair<>(property.getName(), property.getValue()));
}
}
properties.addAll(deriveRankingPhaseRankProperties(firstPhaseRanking, "firstphase"));
properties.addAll(deriveRankingPhaseRankProperties(secondPhaseRanking, "secondphase"));
for (FieldRankSettings settings : fieldRankSettings.values()) {
properties.addAll(settings.deriveRankProperties());
}
for (RankProfile.RankProperty property : boostAndWeightRankProperties) {
properties.add(new Pair<>(property.getName(), property.getValue()));
}
for (ReferenceNode feature : summaryFeatures) {
properties.add(new Pair<>(summaryFeatureFefPropertyPrefix, feature.toString()));
}
for (ReferenceNode feature : rankFeatures) {
properties.add(new Pair<>(rankFeatureFefPropertyPrefix, feature.toString()));
}
if (numThreadsPerSearch > 0) {
properties.add(new Pair<>("vespa.matching.numthreadspersearch", numThreadsPerSearch + ""));
}
if (minHitsPerThread > 0) {
properties.add(new Pair<>("vespa.matching.minhitsperthread", minHitsPerThread + ""));
}
if (numSearchPartitions >= 0) {
properties.add(new Pair<>("vespa.matching.numsearchpartitions", numSearchPartitions + ""));
}
if (termwiseLimit < 1.0) {
properties.add(new Pair<>("vespa.matching.termwise_limit", termwiseLimit + ""));
}
if (matchPhaseSettings != null) {
properties.add(new Pair<>("vespa.matchphase.degradation.attribute", matchPhaseSettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.degradation.ascendingorder", matchPhaseSettings.getAscending() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.maxhits", matchPhaseSettings.getMaxHits() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.maxfiltercoverage", matchPhaseSettings.getMaxFilterCoverage() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.samplepercentage", matchPhaseSettings.getEvaluationPoint() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.postfiltermultiplier", matchPhaseSettings.getPrePostFilterTippingPoint() + ""));
RankProfile.DiversitySettings diversitySettings = matchPhaseSettings.getDiversity();
if (diversitySettings != null) {
properties.add(new Pair<>("vespa.matchphase.diversity.attribute", diversitySettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.diversity.mingroups", String.valueOf(diversitySettings.getMinGroups())));
properties.add(new Pair<>("vespa.matchphase.diversity.cutoff.factor", String.valueOf(diversitySettings.getCutoffFactor())));
properties.add(new Pair<>("vespa.matchphase.diversity.cutoff.strategy", String.valueOf(diversitySettings.getCutoffStrategy())));
}
}
if (rerankCount > -1) {
properties.add(new Pair<>("vespa.hitcollector.heapsize", rerankCount + ""));
}
if (keepRankCount > -1) {
properties.add(new Pair<>("vespa.hitcollector.arraysize", keepRankCount + ""));
}
if (rankScoreDropLimit > -Double.MAX_VALUE) {
properties.add(new Pair<>("vespa.hitcollector.rankscoredroplimit", rankScoreDropLimit + ""));
}
if (ignoreDefaultRankFeatures) {
properties.add(new Pair<>("vespa.dump.ignoredefaultfeatures", String.valueOf(true)));
}
Iterator filterFieldsIterator = filterFields.iterator();
while (filterFieldsIterator.hasNext()) {
String fieldName = (String) filterFieldsIterator.next();
properties.add(new Pair<>("vespa.isfilterfield." + fieldName, String.valueOf(true)));
}
for (Map.Entry<String, String> attributeType : attributeTypes.entrySet()) {
properties.add(new Pair<>("vespa.type.attribute." + attributeType.getKey(), attributeType.getValue()));
}
for (Map.Entry<String, String> queryFeatureType : queryFeatureTypes.entrySet()) {
properties.add(new Pair<>("vespa.type.query." + queryFeatureType.getKey(), queryFeatureType.getValue()));
}
if (properties.size() >= 1000000) throw new RuntimeException("Too many rank properties");
return properties;
}
private List<Pair<String, String>> deriveRankingPhaseRankProperties(RankingExpression expression, String phase) {
List<Pair<String, String>> properties = new ArrayList<>();
if (expression == null) return properties;
String name = expression.getName();
if ("".equals(name))
name = phase;
if (expression.getRoot() instanceof ReferenceNode) {
properties.add(new Pair<>("vespa.rank." + phase, expression.getRoot().toString()));
} else {
properties.add(new Pair<>("vespa.rank." + phase, "rankingExpression(" + name + ")"));
properties.add(new Pair<>("rankingExpression(" + name + ").rankingScript", expression.getRoot().toString()));
}
return properties;
}
}
} |
Also only used for testing. | public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels, AttributeFields attributeFields) {
this.name = rankProfile.getName();
compressedProperties = compress(new Deriver(rankProfile, queryProfiles, importedModels, attributeFields, new TestProperties()).derive());
} | compressedProperties = compress(new Deriver(rankProfile, queryProfiles, importedModels, attributeFields, new TestProperties()).derive()); | public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels, AttributeFields attributeFields) {
this(rankProfile, queryProfiles, importedModels, attributeFields, new TestProperties());
} | class RawRankProfile implements RankProfilesConfig.Producer {
/** A reusable compressor with default settings */
private static final Compressor compressor = new Compressor();
private final String keyEndMarker = "\r=";
private final String valueEndMarker = "\r\n";
public final static String summaryFeatureFefPropertyPrefix = "vespa.summary.feature";
public final static String rankFeatureFefPropertyPrefix = "vespa.dump.feature";
private final String name;
private final Compressor.Compression compressedProperties;
/**
* Creates a raw rank profile from the given rank profile
*/
public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels, AttributeFields attributeFields, ModelContext.Properties deployProperties) {
this.name = rankProfile.getName();
compressedProperties = compress(new Deriver(rankProfile, queryProfiles, importedModels, attributeFields, deployProperties).derive());
}
/**
* Only for testing
*/
private Compressor.Compression compress(List<Pair<String, String>> properties) {
StringBuilder b = new StringBuilder();
for (Pair<String, String> property : properties)
b.append(property.getFirst()).append(keyEndMarker).append(property.getSecond()).append(valueEndMarker);
return compressor.compress(b.toString().getBytes(StandardCharsets.UTF_8));
}
private List<Pair<String, String>> decompress(Compressor.Compression compression) {
String propertiesString = new String(compressor.decompress(compression), StandardCharsets.UTF_8);
if (propertiesString.isEmpty()) return ImmutableList.of();
ImmutableList.Builder<Pair<String, String>> properties = new ImmutableList.Builder<>();
for (String propertyString : propertiesString.split(valueEndMarker)) {
String[] property = propertyString.split(keyEndMarker);
properties.add(new Pair<>(property[0], property[1]));
}
return properties.build();
}
public String getName() { return name; }
@Override
public String toString() {
return " rank profile " + name;
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
RankProfilesConfig.Rankprofile.Builder b = new RankProfilesConfig.Rankprofile.Builder().name(getName());
getRankProperties(b);
builder.rankprofile(b);
}
private void getRankProperties(RankProfilesConfig.Rankprofile.Builder b) {
RankProfilesConfig.Rankprofile.Fef.Builder fefB = new RankProfilesConfig.Rankprofile.Fef.Builder();
for (Pair<String, String> p : decompress(compressedProperties))
fefB.property(new RankProfilesConfig.Rankprofile.Fef.Property.Builder().name(p.getFirst()).value(p.getSecond()));
b.fef(fefB);
}
/**
* Returns the properties of this as an unmodifiable list.
* Note: This method is expensive.
*/
public List<Pair<String, String>> configProperties() { return decompress(compressedProperties); }
private static class Deriver {
/**
* The field rank settings of this profile
*/
private Map<String, FieldRankSettings> fieldRankSettings = new java.util.LinkedHashMap<>();
private RankingExpression firstPhaseRanking = null;
private RankingExpression secondPhaseRanking = null;
private Set<ReferenceNode> summaryFeatures = new LinkedHashSet<>();
private Set<ReferenceNode> rankFeatures = new LinkedHashSet<>();
private List<RankProfile.RankProperty> rankProperties = new ArrayList<>();
/**
* Rank properties for weight settings to make these available to feature executors
*/
private List<RankProfile.RankProperty> boostAndWeightRankProperties = new ArrayList<>();
private boolean ignoreDefaultRankFeatures = false;
private RankProfile.MatchPhaseSettings matchPhaseSettings = null;
private int rerankCount = -1;
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private double termwiseLimit = 1.0;
private double rankScoreDropLimit = -Double.MAX_VALUE;
/**
* The rank type definitions used to derive settings for the native rank features
*/
private final NativeRankTypeDefinitionSet nativeRankTypeDefinitions = new NativeRankTypeDefinitionSet("default");
private final Map<String, String> attributeTypes;
private final Map<String, String> queryFeatureTypes;
private Set<String> filterFields = new java.util.LinkedHashSet<>();
/**
* Creates a raw rank profile from the given rank profile
*/
Deriver(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
AttributeFields attributeFields, ModelContext.Properties deployProperties)
{
RankProfile compiled = rankProfile.compile(queryProfiles, importedModels);
attributeTypes = compiled.getAttributeTypes();
queryFeatureTypes = compiled.getQueryFeatureTypes();
deriveRankingFeatures(compiled, deployProperties);
deriveRankTypeSetting(compiled, attributeFields);
deriveFilterFields(compiled);
deriveWeightProperties(compiled);
}
private void deriveFilterFields(RankProfile rp) {
filterFields.addAll(rp.allFilterFields());
}
private void deriveRankingFeatures(RankProfile rankProfile, ModelContext.Properties deployProperties) {
firstPhaseRanking = rankProfile.getFirstPhaseRanking();
secondPhaseRanking = rankProfile.getSecondPhaseRanking();
summaryFeatures = new LinkedHashSet<>(rankProfile.getSummaryFeatures());
rankFeatures = rankProfile.getRankFeatures();
rerankCount = rankProfile.getRerankCount();
matchPhaseSettings = rankProfile.getMatchPhaseSettings();
numThreadsPerSearch = rankProfile.getNumThreadsPerSearch();
minHitsPerThread = rankProfile.getMinHitsPerThread();
numSearchPartitions = rankProfile.getNumSearchPartitions();
termwiseLimit = rankProfile.getTermwiseLimit().orElse(deployProperties.defaultTermwiseLimit());
keepRankCount = rankProfile.getKeepRankCount();
rankScoreDropLimit = rankProfile.getRankScoreDropLimit();
ignoreDefaultRankFeatures = rankProfile.getIgnoreDefaultRankFeatures();
rankProperties = new ArrayList<>(rankProfile.getRankProperties());
derivePropertiesAndSummaryFeaturesFromFunctions(rankProfile.getFunctions());
}
private void derivePropertiesAndSummaryFeaturesFromFunctions(Map<String, RankProfile.RankingExpressionFunction> functions) {
if (functions.isEmpty()) return;
List<ExpressionFunction> functionExpressions = functions.values().stream().map(f -> f.function()).collect(Collectors.toList());
Map<String, String> functionProperties = new LinkedHashMap<>();
functionProperties.putAll(deriveFunctionProperties(functions, functionExpressions));
if (firstPhaseRanking != null) {
functionProperties.putAll(firstPhaseRanking.getRankProperties(functionExpressions));
}
if (secondPhaseRanking != null) {
functionProperties.putAll(secondPhaseRanking.getRankProperties(functionExpressions));
}
for (Map.Entry<String, String> e : functionProperties.entrySet()) {
rankProperties.add(new RankProfile.RankProperty(e.getKey(), e.getValue()));
}
SerializationContext context = new SerializationContext(functionExpressions, null, functionProperties);
replaceFunctionSummaryFeatures(context);
}
private Map<String, String> deriveFunctionProperties(Map<String, RankProfile.RankingExpressionFunction> functions,
List<ExpressionFunction> functionExpressions) {
SerializationContext context = new SerializationContext(functionExpressions);
for (Map.Entry<String, RankProfile.RankingExpressionFunction> e : functions.entrySet()) {
String expressionString = e.getValue().function().getBody().getRoot().toString(new StringBuilder(), context, null, null).toString();
context.addFunctionSerialization(RankingExpression.propertyName(e.getKey()), expressionString);
for (Map.Entry<String, TensorType> argumentType : e.getValue().function().argumentTypes().entrySet())
context.addArgumentTypeSerialization(e.getKey(), argumentType.getKey(), argumentType.getValue());
if (e.getValue().function().returnType().isPresent())
context.addFunctionTypeSerialization(e.getKey(), e.getValue().function().returnType().get());
}
return context.serializedFunctions();
}
private void replaceFunctionSummaryFeatures(SerializationContext context) {
if (summaryFeatures == null) return;
Map<String, ReferenceNode> functionSummaryFeatures = new LinkedHashMap<>();
for (Iterator<ReferenceNode> i = summaryFeatures.iterator(); i.hasNext(); ) {
ReferenceNode referenceNode = i.next();
if (context.getFunction(referenceNode.getName()) != null) {
context.addFunctionSerialization(RankingExpression.propertyName(referenceNode.getName()),
referenceNode.toString(new StringBuilder(), context, null, null).toString());
ReferenceNode newReferenceNode = new ReferenceNode("rankingExpression(" + referenceNode.getName() + ")", referenceNode.getArguments().expressions(), referenceNode.getOutput());
functionSummaryFeatures.put(referenceNode.getName(), newReferenceNode);
i.remove();
}
}
for (Map.Entry<String, ReferenceNode> e : functionSummaryFeatures.entrySet()) {
summaryFeatures.add(e.getValue());
}
}
private void deriveWeightProperties(RankProfile rankProfile) {
for (RankProfile.RankSetting setting : rankProfile.rankSettings()) {
if (!setting.getType().equals(RankProfile.RankSetting.Type.WEIGHT)) {
continue;
}
boostAndWeightRankProperties.add(new RankProfile.RankProperty("vespa.fieldweight." + setting.getFieldName(),
String.valueOf(setting.getIntValue())));
}
}
/**
* Adds the type boosts from a rank profile
*/
private void deriveRankTypeSetting(RankProfile rankProfile, AttributeFields attributeFields) {
for (Iterator<RankProfile.RankSetting> i = rankProfile.rankSettingIterator(); i.hasNext(); ) {
RankProfile.RankSetting setting = i.next();
if (!setting.getType().equals(RankProfile.RankSetting.Type.RANKTYPE)) continue;
deriveNativeRankTypeSetting(setting.getFieldName(), (RankType) setting.getValue(), attributeFields,
hasDefaultRankTypeSetting(rankProfile, setting.getFieldName()));
}
}
private void deriveNativeRankTypeSetting(String fieldName, RankType rankType, AttributeFields attributeFields, boolean isDefaultSetting) {
if (isDefaultSetting) return;
NativeRankTypeDefinition definition = nativeRankTypeDefinitions.getRankTypeDefinition(rankType);
if (definition == null) throw new IllegalArgumentException("In field '" + fieldName + "': " +
rankType + " is known but has no implementation. " +
"Supported rank types: " +
nativeRankTypeDefinitions.types().keySet());
FieldRankSettings settings = deriveFieldRankSettings(fieldName);
for (Iterator<NativeTable> i = definition.rankSettingIterator(); i.hasNext(); ) {
NativeTable table = i.next();
if ((FieldRankSettings.isIndexFieldTable(table) && attributeFields.getAttribute(fieldName) == null) ||
(FieldRankSettings.isAttributeFieldTable(table) && attributeFields.getAttribute(fieldName) != null)) {
settings.addTable(table);
}
}
}
private boolean hasDefaultRankTypeSetting(RankProfile rankProfile, String fieldName) {
RankProfile.RankSetting setting =
rankProfile.getRankSetting(fieldName, RankProfile.RankSetting.Type.RANKTYPE);
return setting != null && setting.getValue().equals(RankType.DEFAULT);
}
private FieldRankSettings deriveFieldRankSettings(String fieldName) {
FieldRankSettings settings = fieldRankSettings.get(fieldName);
if (settings == null) {
settings = new FieldRankSettings(fieldName);
fieldRankSettings.put(fieldName, settings);
}
return settings;
}
/** Derives the properties this produces */
public List<Pair<String, String>> derive() {
List<Pair<String, String>> properties = new ArrayList<>();
for (RankProfile.RankProperty property : rankProperties) {
if ("rankingExpression(firstphase).rankingScript".equals(property.getName())) {
try {
firstPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
throw new IllegalArgumentException("Could not parse second phase expression", e);
}
}
else if ("rankingExpression(secondphase).rankingScript".equals(property.getName())) {
try {
secondPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
throw new IllegalArgumentException("Could not parse second phase expression", e);
}
}
else {
properties.add(new Pair<>(property.getName(), property.getValue()));
}
}
properties.addAll(deriveRankingPhaseRankProperties(firstPhaseRanking, "firstphase"));
properties.addAll(deriveRankingPhaseRankProperties(secondPhaseRanking, "secondphase"));
for (FieldRankSettings settings : fieldRankSettings.values()) {
properties.addAll(settings.deriveRankProperties());
}
for (RankProfile.RankProperty property : boostAndWeightRankProperties) {
properties.add(new Pair<>(property.getName(), property.getValue()));
}
for (ReferenceNode feature : summaryFeatures) {
properties.add(new Pair<>(summaryFeatureFefPropertyPrefix, feature.toString()));
}
for (ReferenceNode feature : rankFeatures) {
properties.add(new Pair<>(rankFeatureFefPropertyPrefix, feature.toString()));
}
if (numThreadsPerSearch > 0) {
properties.add(new Pair<>("vespa.matching.numthreadspersearch", numThreadsPerSearch + ""));
}
if (minHitsPerThread > 0) {
properties.add(new Pair<>("vespa.matching.minhitsperthread", minHitsPerThread + ""));
}
if (numSearchPartitions >= 0) {
properties.add(new Pair<>("vespa.matching.numsearchpartitions", numSearchPartitions + ""));
}
if (termwiseLimit < 1.0) {
properties.add(new Pair<>("vespa.matching.termwise_limit", termwiseLimit + ""));
}
if (matchPhaseSettings != null) {
properties.add(new Pair<>("vespa.matchphase.degradation.attribute", matchPhaseSettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.degradation.ascendingorder", matchPhaseSettings.getAscending() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.maxhits", matchPhaseSettings.getMaxHits() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.maxfiltercoverage", matchPhaseSettings.getMaxFilterCoverage() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.samplepercentage", matchPhaseSettings.getEvaluationPoint() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.postfiltermultiplier", matchPhaseSettings.getPrePostFilterTippingPoint() + ""));
RankProfile.DiversitySettings diversitySettings = matchPhaseSettings.getDiversity();
if (diversitySettings != null) {
properties.add(new Pair<>("vespa.matchphase.diversity.attribute", diversitySettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.diversity.mingroups", String.valueOf(diversitySettings.getMinGroups())));
properties.add(new Pair<>("vespa.matchphase.diversity.cutoff.factor", String.valueOf(diversitySettings.getCutoffFactor())));
properties.add(new Pair<>("vespa.matchphase.diversity.cutoff.strategy", String.valueOf(diversitySettings.getCutoffStrategy())));
}
}
if (rerankCount > -1) {
properties.add(new Pair<>("vespa.hitcollector.heapsize", rerankCount + ""));
}
if (keepRankCount > -1) {
properties.add(new Pair<>("vespa.hitcollector.arraysize", keepRankCount + ""));
}
if (rankScoreDropLimit > -Double.MAX_VALUE) {
properties.add(new Pair<>("vespa.hitcollector.rankscoredroplimit", rankScoreDropLimit + ""));
}
if (ignoreDefaultRankFeatures) {
properties.add(new Pair<>("vespa.dump.ignoredefaultfeatures", String.valueOf(true)));
}
Iterator filterFieldsIterator = filterFields.iterator();
while (filterFieldsIterator.hasNext()) {
String fieldName = (String) filterFieldsIterator.next();
properties.add(new Pair<>("vespa.isfilterfield." + fieldName, String.valueOf(true)));
}
for (Map.Entry<String, String> attributeType : attributeTypes.entrySet()) {
properties.add(new Pair<>("vespa.type.attribute." + attributeType.getKey(), attributeType.getValue()));
}
for (Map.Entry<String, String> queryFeatureType : queryFeatureTypes.entrySet()) {
properties.add(new Pair<>("vespa.type.query." + queryFeatureType.getKey(), queryFeatureType.getValue()));
}
if (properties.size() >= 1000000) throw new RuntimeException("Too many rank properties");
return properties;
}
private List<Pair<String, String>> deriveRankingPhaseRankProperties(RankingExpression expression, String phase) {
List<Pair<String, String>> properties = new ArrayList<>();
if (expression == null) return properties;
String name = expression.getName();
if ("".equals(name))
name = phase;
if (expression.getRoot() instanceof ReferenceNode) {
properties.add(new Pair<>("vespa.rank." + phase, expression.getRoot().toString()));
} else {
properties.add(new Pair<>("vespa.rank." + phase, "rankingExpression(" + name + ")"));
properties.add(new Pair<>("rankingExpression(" + name + ").rankingScript", expression.getRoot().toString()));
}
return properties;
}
}
} | class RawRankProfile implements RankProfilesConfig.Producer {
/** A reusable compressor with default settings */
private static final Compressor compressor = new Compressor();
private final String keyEndMarker = "\r=";
private final String valueEndMarker = "\r\n";
public final static String summaryFeatureFefPropertyPrefix = "vespa.summary.feature";
public final static String rankFeatureFefPropertyPrefix = "vespa.dump.feature";
private final String name;
private final Compressor.Compression compressedProperties;
/**
* Creates a raw rank profile from the given rank profile
*/
public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels, AttributeFields attributeFields, ModelContext.Properties deployProperties) {
this.name = rankProfile.getName();
compressedProperties = compress(new Deriver(rankProfile, queryProfiles, importedModels, attributeFields, deployProperties).derive());
}
/**
* Only for testing
*/
private Compressor.Compression compress(List<Pair<String, String>> properties) {
StringBuilder b = new StringBuilder();
for (Pair<String, String> property : properties)
b.append(property.getFirst()).append(keyEndMarker).append(property.getSecond()).append(valueEndMarker);
return compressor.compress(b.toString().getBytes(StandardCharsets.UTF_8));
}
private List<Pair<String, String>> decompress(Compressor.Compression compression) {
String propertiesString = new String(compressor.decompress(compression), StandardCharsets.UTF_8);
if (propertiesString.isEmpty()) return ImmutableList.of();
ImmutableList.Builder<Pair<String, String>> properties = new ImmutableList.Builder<>();
for (String propertyString : propertiesString.split(valueEndMarker)) {
String[] property = propertyString.split(keyEndMarker);
properties.add(new Pair<>(property[0], property[1]));
}
return properties.build();
}
public String getName() { return name; }
@Override
public String toString() {
return " rank profile " + name;
}
@Override
public void getConfig(RankProfilesConfig.Builder builder) {
RankProfilesConfig.Rankprofile.Builder b = new RankProfilesConfig.Rankprofile.Builder().name(getName());
getRankProperties(b);
builder.rankprofile(b);
}
private void getRankProperties(RankProfilesConfig.Rankprofile.Builder b) {
RankProfilesConfig.Rankprofile.Fef.Builder fefB = new RankProfilesConfig.Rankprofile.Fef.Builder();
for (Pair<String, String> p : decompress(compressedProperties))
fefB.property(new RankProfilesConfig.Rankprofile.Fef.Property.Builder().name(p.getFirst()).value(p.getSecond()));
b.fef(fefB);
}
/**
* Returns the properties of this as an unmodifiable list.
* Note: This method is expensive.
*/
public List<Pair<String, String>> configProperties() { return decompress(compressedProperties); }
private static class Deriver {
/**
* The field rank settings of this profile
*/
private Map<String, FieldRankSettings> fieldRankSettings = new java.util.LinkedHashMap<>();
private RankingExpression firstPhaseRanking = null;
private RankingExpression secondPhaseRanking = null;
private Set<ReferenceNode> summaryFeatures = new LinkedHashSet<>();
private Set<ReferenceNode> rankFeatures = new LinkedHashSet<>();
private List<RankProfile.RankProperty> rankProperties = new ArrayList<>();
/**
* Rank properties for weight settings to make these available to feature executors
*/
private List<RankProfile.RankProperty> boostAndWeightRankProperties = new ArrayList<>();
private boolean ignoreDefaultRankFeatures = false;
private RankProfile.MatchPhaseSettings matchPhaseSettings = null;
private int rerankCount = -1;
private int keepRankCount = -1;
private int numThreadsPerSearch = -1;
private int minHitsPerThread = -1;
private int numSearchPartitions = -1;
private double termwiseLimit = 1.0;
private double rankScoreDropLimit = -Double.MAX_VALUE;
/**
* The rank type definitions used to derive settings for the native rank features
*/
private final NativeRankTypeDefinitionSet nativeRankTypeDefinitions = new NativeRankTypeDefinitionSet("default");
private final Map<String, String> attributeTypes;
private final Map<String, String> queryFeatureTypes;
private Set<String> filterFields = new java.util.LinkedHashSet<>();
/**
* Creates a raw rank profile from the given rank profile
*/
Deriver(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
AttributeFields attributeFields, ModelContext.Properties deployProperties)
{
RankProfile compiled = rankProfile.compile(queryProfiles, importedModels);
attributeTypes = compiled.getAttributeTypes();
queryFeatureTypes = compiled.getQueryFeatureTypes();
deriveRankingFeatures(compiled, deployProperties);
deriveRankTypeSetting(compiled, attributeFields);
deriveFilterFields(compiled);
deriveWeightProperties(compiled);
}
private void deriveFilterFields(RankProfile rp) {
filterFields.addAll(rp.allFilterFields());
}
private void deriveRankingFeatures(RankProfile rankProfile, ModelContext.Properties deployProperties) {
firstPhaseRanking = rankProfile.getFirstPhaseRanking();
secondPhaseRanking = rankProfile.getSecondPhaseRanking();
summaryFeatures = new LinkedHashSet<>(rankProfile.getSummaryFeatures());
rankFeatures = rankProfile.getRankFeatures();
rerankCount = rankProfile.getRerankCount();
matchPhaseSettings = rankProfile.getMatchPhaseSettings();
numThreadsPerSearch = rankProfile.getNumThreadsPerSearch();
minHitsPerThread = rankProfile.getMinHitsPerThread();
numSearchPartitions = rankProfile.getNumSearchPartitions();
termwiseLimit = rankProfile.getTermwiseLimit().orElse(deployProperties.defaultTermwiseLimit());
keepRankCount = rankProfile.getKeepRankCount();
rankScoreDropLimit = rankProfile.getRankScoreDropLimit();
ignoreDefaultRankFeatures = rankProfile.getIgnoreDefaultRankFeatures();
rankProperties = new ArrayList<>(rankProfile.getRankProperties());
derivePropertiesAndSummaryFeaturesFromFunctions(rankProfile.getFunctions());
}
private void derivePropertiesAndSummaryFeaturesFromFunctions(Map<String, RankProfile.RankingExpressionFunction> functions) {
if (functions.isEmpty()) return;
List<ExpressionFunction> functionExpressions = functions.values().stream().map(f -> f.function()).collect(Collectors.toList());
Map<String, String> functionProperties = new LinkedHashMap<>();
functionProperties.putAll(deriveFunctionProperties(functions, functionExpressions));
if (firstPhaseRanking != null) {
functionProperties.putAll(firstPhaseRanking.getRankProperties(functionExpressions));
}
if (secondPhaseRanking != null) {
functionProperties.putAll(secondPhaseRanking.getRankProperties(functionExpressions));
}
for (Map.Entry<String, String> e : functionProperties.entrySet()) {
rankProperties.add(new RankProfile.RankProperty(e.getKey(), e.getValue()));
}
SerializationContext context = new SerializationContext(functionExpressions, null, functionProperties);
replaceFunctionSummaryFeatures(context);
}
private Map<String, String> deriveFunctionProperties(Map<String, RankProfile.RankingExpressionFunction> functions,
List<ExpressionFunction> functionExpressions) {
SerializationContext context = new SerializationContext(functionExpressions);
for (Map.Entry<String, RankProfile.RankingExpressionFunction> e : functions.entrySet()) {
String expressionString = e.getValue().function().getBody().getRoot().toString(new StringBuilder(), context, null, null).toString();
context.addFunctionSerialization(RankingExpression.propertyName(e.getKey()), expressionString);
for (Map.Entry<String, TensorType> argumentType : e.getValue().function().argumentTypes().entrySet())
context.addArgumentTypeSerialization(e.getKey(), argumentType.getKey(), argumentType.getValue());
if (e.getValue().function().returnType().isPresent())
context.addFunctionTypeSerialization(e.getKey(), e.getValue().function().returnType().get());
}
return context.serializedFunctions();
}
private void replaceFunctionSummaryFeatures(SerializationContext context) {
if (summaryFeatures == null) return;
Map<String, ReferenceNode> functionSummaryFeatures = new LinkedHashMap<>();
for (Iterator<ReferenceNode> i = summaryFeatures.iterator(); i.hasNext(); ) {
ReferenceNode referenceNode = i.next();
if (context.getFunction(referenceNode.getName()) != null) {
context.addFunctionSerialization(RankingExpression.propertyName(referenceNode.getName()),
referenceNode.toString(new StringBuilder(), context, null, null).toString());
ReferenceNode newReferenceNode = new ReferenceNode("rankingExpression(" + referenceNode.getName() + ")", referenceNode.getArguments().expressions(), referenceNode.getOutput());
functionSummaryFeatures.put(referenceNode.getName(), newReferenceNode);
i.remove();
}
}
for (Map.Entry<String, ReferenceNode> e : functionSummaryFeatures.entrySet()) {
summaryFeatures.add(e.getValue());
}
}
private void deriveWeightProperties(RankProfile rankProfile) {
for (RankProfile.RankSetting setting : rankProfile.rankSettings()) {
if (!setting.getType().equals(RankProfile.RankSetting.Type.WEIGHT)) {
continue;
}
boostAndWeightRankProperties.add(new RankProfile.RankProperty("vespa.fieldweight." + setting.getFieldName(),
String.valueOf(setting.getIntValue())));
}
}
/**
* Adds the type boosts from a rank profile
*/
private void deriveRankTypeSetting(RankProfile rankProfile, AttributeFields attributeFields) {
for (Iterator<RankProfile.RankSetting> i = rankProfile.rankSettingIterator(); i.hasNext(); ) {
RankProfile.RankSetting setting = i.next();
if (!setting.getType().equals(RankProfile.RankSetting.Type.RANKTYPE)) continue;
deriveNativeRankTypeSetting(setting.getFieldName(), (RankType) setting.getValue(), attributeFields,
hasDefaultRankTypeSetting(rankProfile, setting.getFieldName()));
}
}
private void deriveNativeRankTypeSetting(String fieldName, RankType rankType, AttributeFields attributeFields, boolean isDefaultSetting) {
if (isDefaultSetting) return;
NativeRankTypeDefinition definition = nativeRankTypeDefinitions.getRankTypeDefinition(rankType);
if (definition == null) throw new IllegalArgumentException("In field '" + fieldName + "': " +
rankType + " is known but has no implementation. " +
"Supported rank types: " +
nativeRankTypeDefinitions.types().keySet());
FieldRankSettings settings = deriveFieldRankSettings(fieldName);
for (Iterator<NativeTable> i = definition.rankSettingIterator(); i.hasNext(); ) {
NativeTable table = i.next();
if ((FieldRankSettings.isIndexFieldTable(table) && attributeFields.getAttribute(fieldName) == null) ||
(FieldRankSettings.isAttributeFieldTable(table) && attributeFields.getAttribute(fieldName) != null)) {
settings.addTable(table);
}
}
}
private boolean hasDefaultRankTypeSetting(RankProfile rankProfile, String fieldName) {
RankProfile.RankSetting setting =
rankProfile.getRankSetting(fieldName, RankProfile.RankSetting.Type.RANKTYPE);
return setting != null && setting.getValue().equals(RankType.DEFAULT);
}
private FieldRankSettings deriveFieldRankSettings(String fieldName) {
FieldRankSettings settings = fieldRankSettings.get(fieldName);
if (settings == null) {
settings = new FieldRankSettings(fieldName);
fieldRankSettings.put(fieldName, settings);
}
return settings;
}
/** Derives the properties this produces */
public List<Pair<String, String>> derive() {
List<Pair<String, String>> properties = new ArrayList<>();
for (RankProfile.RankProperty property : rankProperties) {
if ("rankingExpression(firstphase).rankingScript".equals(property.getName())) {
try {
firstPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
throw new IllegalArgumentException("Could not parse second phase expression", e);
}
}
else if ("rankingExpression(secondphase).rankingScript".equals(property.getName())) {
try {
secondPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
throw new IllegalArgumentException("Could not parse second phase expression", e);
}
}
else {
properties.add(new Pair<>(property.getName(), property.getValue()));
}
}
properties.addAll(deriveRankingPhaseRankProperties(firstPhaseRanking, "firstphase"));
properties.addAll(deriveRankingPhaseRankProperties(secondPhaseRanking, "secondphase"));
for (FieldRankSettings settings : fieldRankSettings.values()) {
properties.addAll(settings.deriveRankProperties());
}
for (RankProfile.RankProperty property : boostAndWeightRankProperties) {
properties.add(new Pair<>(property.getName(), property.getValue()));
}
for (ReferenceNode feature : summaryFeatures) {
properties.add(new Pair<>(summaryFeatureFefPropertyPrefix, feature.toString()));
}
for (ReferenceNode feature : rankFeatures) {
properties.add(new Pair<>(rankFeatureFefPropertyPrefix, feature.toString()));
}
if (numThreadsPerSearch > 0) {
properties.add(new Pair<>("vespa.matching.numthreadspersearch", numThreadsPerSearch + ""));
}
if (minHitsPerThread > 0) {
properties.add(new Pair<>("vespa.matching.minhitsperthread", minHitsPerThread + ""));
}
if (numSearchPartitions >= 0) {
properties.add(new Pair<>("vespa.matching.numsearchpartitions", numSearchPartitions + ""));
}
if (termwiseLimit < 1.0) {
properties.add(new Pair<>("vespa.matching.termwise_limit", termwiseLimit + ""));
}
if (matchPhaseSettings != null) {
properties.add(new Pair<>("vespa.matchphase.degradation.attribute", matchPhaseSettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.degradation.ascendingorder", matchPhaseSettings.getAscending() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.maxhits", matchPhaseSettings.getMaxHits() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.maxfiltercoverage", matchPhaseSettings.getMaxFilterCoverage() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.samplepercentage", matchPhaseSettings.getEvaluationPoint() + ""));
properties.add(new Pair<>("vespa.matchphase.degradation.postfiltermultiplier", matchPhaseSettings.getPrePostFilterTippingPoint() + ""));
RankProfile.DiversitySettings diversitySettings = matchPhaseSettings.getDiversity();
if (diversitySettings != null) {
properties.add(new Pair<>("vespa.matchphase.diversity.attribute", diversitySettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.diversity.mingroups", String.valueOf(diversitySettings.getMinGroups())));
properties.add(new Pair<>("vespa.matchphase.diversity.cutoff.factor", String.valueOf(diversitySettings.getCutoffFactor())));
properties.add(new Pair<>("vespa.matchphase.diversity.cutoff.strategy", String.valueOf(diversitySettings.getCutoffStrategy())));
}
}
if (rerankCount > -1) {
properties.add(new Pair<>("vespa.hitcollector.heapsize", rerankCount + ""));
}
if (keepRankCount > -1) {
properties.add(new Pair<>("vespa.hitcollector.arraysize", keepRankCount + ""));
}
if (rankScoreDropLimit > -Double.MAX_VALUE) {
properties.add(new Pair<>("vespa.hitcollector.rankscoredroplimit", rankScoreDropLimit + ""));
}
if (ignoreDefaultRankFeatures) {
properties.add(new Pair<>("vespa.dump.ignoredefaultfeatures", String.valueOf(true)));
}
Iterator filterFieldsIterator = filterFields.iterator();
while (filterFieldsIterator.hasNext()) {
String fieldName = (String) filterFieldsIterator.next();
properties.add(new Pair<>("vespa.isfilterfield." + fieldName, String.valueOf(true)));
}
for (Map.Entry<String, String> attributeType : attributeTypes.entrySet()) {
properties.add(new Pair<>("vespa.type.attribute." + attributeType.getKey(), attributeType.getValue()));
}
for (Map.Entry<String, String> queryFeatureType : queryFeatureTypes.entrySet()) {
properties.add(new Pair<>("vespa.type.query." + queryFeatureType.getKey(), queryFeatureType.getValue()));
}
if (properties.size() >= 1000000) throw new RuntimeException("Too many rank properties");
return properties;
}
private List<Pair<String, String>> deriveRankingPhaseRankProperties(RankingExpression expression, String phase) {
List<Pair<String, String>> properties = new ArrayList<>();
if (expression == null) return properties;
String name = expression.getName();
if ("".equals(name))
name = phase;
if (expression.getRoot() instanceof ReferenceNode) {
properties.add(new Pair<>("vespa.rank." + phase, expression.getRoot().toString()));
} else {
properties.add(new Pair<>("vespa.rank." + phase, "rankingExpression(" + name + ")"));
properties.add(new Pair<>("rankingExpression(" + name + ").rankingScript", expression.getRoot().toString()));
}
return properties;
}
}
} |
Rogue paste in "adocnything" probably 🙂 | public void testDocumentUpdate() throws ParseException {
DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:myspace:test::adocnything"));
assertEquals(Result.TRUE, evaluate("test", upd));
assertEquals(Result.FALSE, evaluate("music", upd));
assertEquals(Result.TRUE, evaluate("test or music", upd));
assertEquals(Result.FALSE, evaluate("test and music", upd));
assertEquals(Result.INVALID, evaluate("test.hint", upd));
assertEquals(Result.INVALID, evaluate("test.anything", upd));
assertEquals(Result.INVALID, evaluate("test.hint < 24", upd));
} | DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:myspace:test::adocnything")); | public void testDocumentUpdate() throws ParseException {
DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:myspace:test::anything"));
assertEquals(Result.TRUE, evaluate("test", upd));
assertEquals(Result.FALSE, evaluate("music", upd));
assertEquals(Result.TRUE, evaluate("test or music", upd));
assertEquals(Result.FALSE, evaluate("test and music", upd));
assertEquals(Result.INVALID, evaluate("test.hint", upd));
assertEquals(Result.INVALID, evaluate("test.anything", upd));
assertEquals(Result.INVALID, evaluate("test.hint < 24", upd));
} | class DocumentSelectorTestCase {
private static DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentType type = new DocumentType("test");
type.addHeaderField("hint", DataType.INT);
type.addHeaderField("hfloat", DataType.FLOAT);
type.addHeaderField("hstring", DataType.STRING);
type.addField("content", DataType.STRING);
StructDataType mystruct = new StructDataType("mystruct");
mystruct.addField(new Field("key", DataType.INT, false));
mystruct.addField(new Field("value", DataType.STRING, false));
type.addHeaderField("mystruct", mystruct);
ArrayDataType structarray = new ArrayDataType(mystruct);
type.addField("structarray", structarray);
type.addField("stringweightedset", new WeightedSetDataType(DataType.STRING, false, false));
type.addField("mymap", new MapDataType(DataType.INT, DataType.STRING));
type.addField("structarrmap", new MapDataType(DataType.STRING, structarray));
ArrayDataType intarray = new ArrayDataType(DataType.INT);
type.addField("intarray", intarray);
manager.registerDocumentType(type);
manager.registerDocumentType(new DocumentType("notandor"));
manager.registerDocumentType(new DocumentType("ornotand"));
manager.registerDocumentType(new DocumentType("andornot"));
manager.registerDocumentType(new DocumentType("idid"));
manager.registerDocumentType(new DocumentType("usergroup"));
}
@Test
public void testParsing() throws ParseException {
assertParse("3.14 > 0");
assertParse("-999 > 0");
assertParse("150000.0 > 0", "15e4 > 0");
assertParse("3.4E-4 > 0", "3.4e-4 > 0");
assertParse("\" Test \" = \"*\"");
assertParse("id = \"*\"", "id = '*'");
assertParse("id.group == 3");
assertParse("id.namespace = \"*\"");
assertParse("id.hash() > 0");
assertParse("id.namespace.hash() > 0");
assertParse("music.artist = \"*\"");
assertParse("music.artist.lowercase() = \"*\"");
assertParse("music_.artist = \"*\"");
assertParse("music_foo.artist = \"*\"");
assertParse("music_foo_.artist = \"*\"");
assertParse("(4 + 3) > 0", "(4+3) > 0");
assertParse("1 + 1 > 0", "1 +1 > 0");
assertParse("1 + -1 > 0", "1 + -1 > 0");
assertParse("1 + 1.0 > 0", "1 + +1.0 > 0");
assertParse("1 - 1 > 0", "1 -1 > 0");
assertParse("1 - -1 > 0", "1 - -1 > 0");
assertParse("1 - 1.0 > 0", "1 - +1.0 > 0");
assertParse("1 + 2 * 3 - 10 % 2 / 3 > 0", "1 +2 * 3- 10%2 /3 > 0");
assertParse("((43 + 14) / 34) > 0");
assertParse("(34 * ((3 - 1) % 4)) > 0");
assertParse("true");
assertParse("false");
assertParse("music");
assertParse("(music or book)");
assertParse("music or book", "music or book");
assertParse("(music or (book and video))");
assertParse("music or (book and video)", "music or (book and video)");
assertParse("((music or book) and video)");
assertParse("(music or book) and video", "(music or book) and video");
assertParse("music.test > 0");
assertParse("music.artist = \"*john*\"");
assertParse("music.length >= 180");
assertParse("true or not false and true", "true oR nOt false And true");
assertParse("(true or false) and true", "(true oR false) aNd true");
assertParse("music.expire > now()");
assertParse("music.expire > now() - 300");
assertParse("now or now_search");
assertParse("(music.expire / 1000) > (now() - 300)");
}
@Test
public void testReservedWords() throws ParseException {
assertParse(null, "id == 'id' or id_t or idtype");
assertParse(null, "id.scheme == 'scheme' or scheme_t or schemetype");
assertParse(null, "id.namespace == 'namespace' or namespace_t or namespacetype");
assertParse(null, "id.specific == 'specific' or specific_t or specifictype");
assertParse(null, "id.user == 'user' or user_t or usertype");
assertParse(null, "id.group == 'group' or group_t or grouptype");
assertParse(null, "id.bucket == 'bucket' or bucket_t or buckettype");
assertParse(null, "null == 'null' or null_t or nulltype");
assertParse(null, "true or true_t or truetype");
assertParse(null, "false or false_t or falsetype");
assertParse(null, "true or and_t or andtype");
assertParse(null, "true or or_t or ortype");
}
@Test
public void testCjkParsing() throws ParseException {
assertParse("music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"",
"music.artist = \"\u4f73\u80fd\u7d22\u5c3c\u60e0\u666e\"");
assertParse("music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"",
"music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"");
}
@Test
public void testParseTerminals() throws ParseException {
assertParse("true");
assertParse("music.hmm == 123");
assertParse("music.hmm == 123.53", "music.hmm == +123.53");
assertParse("music.hmm == -123.5");
assertParse("music.hmm == 2.3412352E8", "music.hmm == 234123.52e3");
assertParse("music.hmm == -234.12352", "music.hmm == -234123.52E-3");
assertParse("music.hmm < aaa");
assertParse("music.hmm == \"test\"");
assertParse("music.hmm{test} == \"test\"");
assertParse("music.hmm{test}.foo[3].key == \"test\"");
assertParse("music.hmm == \"te st \"");
assertParse("music.hmm == \"test\"", " \t music.hmm\t== \t \"test\"\t");
assertParse("music.hmm == \"tab\\ttest\"");
assertParse("music.hmm == \"tab\\u0666test\"", "music.hmm == \"tab\\u0666test\"");
assertParse("music.hmm == \"tabcomplete\"", "music.hmm == \"tabcomplete\"");
assertParse("music.hmm == \"tabysf\"", "music.hmm == \"tab\\ysf\"");
assertParse("music.h == \"\\ttx48 \\n\"", "music.h == \"\\tt\\x48 \\n\"");
assertParseError("music.hmm <> 12", "Exception parsing document selector 'music.hmm <> 12': Encountered \" \">\" \">\"\" at line 1, column 12.");
assertParse("music.hmm >= 123");
assertParse("music.hmm > 123");
assertParse("music.hmm <= 123");
assertParse("music.hmm < 123");
assertParse("music.hmm != 123");
assertParse("music.hmm");
assertParse("true", "TRUE");
assertParse("false", "FALSE");
assertParse("true", "true");
assertParse("false", "false");
assertParse("false", "faLSe");
assertParse("mytype");
assertParse("id == \"id:ns:mytype::mytest\"");
assertParse("id.namespace == \"myspace\"");
assertParse("id.scheme == \"id\"");
assertParse("id.type == \"mytype\"");
assertParse("id.user == 1234");
assertParse("id.bucket == 8388608", "id.bucket == 0x800000");
assertParse("id.bucket == 8429568", "id.bucket == 0x80a000");
assertParse("id.bucket == -9223372036854775566",
"id.bucket == 0x80000000000000f2");
assertParse("id.group == \"yahoo.com\"");
assertParse("id.specific == \"mypart\"");
assertParse("id.scheme = \"*doc\"");
assertParse("music.artist =~ \"(john|barry|shrek)\"");
assertParse("id.hash() == 124");
assertParse("id.specific.hash() == 124");
assertParse("music.artist.lowercase() == \"chang\"");
assertParse("music.artist.lowercase().hash() == 124");
assertParse("music.version() == 8");
assertParse("music == 8");
assertParse("(123) < (200)", "(123) < (200)");
assertParse("(\"hmm\") < (id.scheme)", "(\"hmm\") < (id.scheme)");
assertParse("(1 + 2) > 1");
assertParse("1 + 2 > 1", "1 + 2 > 1");
assertParse("(1 - 2) > 1");
assertParse("(1 * 2) > 1");
assertParse("(1 / 2) > 1");
assertParse("(1 % 2) > 1");
assertParse("((1 + 2) * (4 - 2)) == 1");
assertParse("(1 + 2) * (4 - 2) == 1", "(1 + 2) * (4 - 2) == 1");
assertParse("((23 + 643) / (34 % 10)) > 34");
assertParse("23 + 643 / 34 % 10 > 34", "23 + 643 / 34 % 10 > 34");
}
@Test
public void testParseReservedTokens() throws ParseException {
assertParse("user.fieldName == \"fieldValue\"");
assertParse("documentName.user == \"fieldValue\"");
assertParse("group.fieldName == \"fieldValue\"");
assertParse("documentName.group == \"fieldValue\"");
}
@Test
public void testParseBranches() throws ParseException {
assertParse("((true or false) and (false or true))");
assertParse("(true or (not false and not true))");
assertParse("((243) < 300 and (\"FOO\").lowercase() == (\"foo\"))");
}
@Test
@Test
public void testDocumentRemove() throws ParseException {
assertEquals(Result.TRUE, evaluate("test", createRemove("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test", createRemove("id:ns:null::1")));
assertEquals(Result.TRUE, evaluate("test", createRemove("id:ns:test:n=1234:1")));
assertEquals(Result.INVALID, evaluate("test.hint", createRemove("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test.hint", createRemove("id:ns:null::1")));
assertEquals(Result.INVALID, evaluate("test.hint == 0", createRemove("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test.anything", createRemove("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test and test.hint == 0", createRemove("id:ns:test::1")));
}
private DocumentRemove createRemove(String docId) {
return new DocumentRemove(new DocumentId(docId));
}
@Test
public void testDocumentGet() throws ParseException {
assertEquals(Result.TRUE, evaluate("test", createGet("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test", createGet("id:ns:null::1")));
assertEquals(Result.TRUE, evaluate("test", createGet("id:ns:test:n=1234:1")));
assertEquals(Result.INVALID, evaluate("test.hint", createGet("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test.hint", createGet("id:ns:null::1")));
assertEquals(Result.INVALID, evaluate("test.hint == 0", createGet("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test.anything", createGet("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test and test.hint == 0", createGet("id:ns:test::1")));
}
private DocumentGet createGet(String docId) {
return new DocumentGet(new DocumentId(docId));
}
@Test
public void testInvalidLogic() throws ParseException {
DocumentPut put = new DocumentPut(manager.getDocumentType("test"), new DocumentId("id:ns:test::"));
DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:ns:test::"));
assertEquals(Result.FALSE, evaluate("test.content", put));
assertEquals(Result.INVALID, evaluate("test.content", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1", put));
assertEquals(Result.INVALID, evaluate("test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 and true", put));
assertEquals(Result.INVALID, evaluate("test.content = 1 and true", upd));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", put));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", put));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 or false", put));
assertEquals(Result.INVALID, evaluate("test.content = 1 or false", upd));
assertEquals(Result.FALSE, evaluate("true and test.content = 1", put));
assertEquals(Result.INVALID, evaluate("true and test.content = 1", upd));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", put));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", put));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("false or test.content = 1", put));
assertEquals(Result.INVALID, evaluate("false or test.content = 1", upd));
}
List<DocumentPut> createDocs() {
List<DocumentPut> documents = new ArrayList<>();
documents.add(createDocument("id:myspace:test::anything", 24, 2.0f, "foo", "bar"));
documents.add(createDocument("id:anotherspace:test::foo", 13, 4.1f, "bar", "foo"));
documents.add(createDocument("id:myspace:test:n=1234:mail1", 15, 1.0f, "some", "some"));
documents.add(createDocument("id:myspace:test:n=5678:bar", 14, 2.4f, "Yet", "More"));
documents.add(createDocument("id:myspace:test:n=2345:mail2", 15, 1.0f, "bar", "baz"));
documents.add(createDocument("id:myspace:test:g=mygroup:qux", 15, 1.0f, "quux", "corge"));
documents.add(createDocument("id:myspace:test::missingint", null, 2.0f, null, "bar"));
Struct sval = new Struct(documents.get(1).getDocument().getField("mystruct").getDataType());
sval.setFieldValue("key", new IntegerFieldValue(14));
sval.setFieldValue("value", new StringFieldValue("structval"));
documents.get(1).getDocument().setFieldValue("mystruct", sval);
Array<Struct> aval = new Array<>(documents.get(1).getDocument().getField("structarray").getDataType());
{
Struct sval1 = new Struct(aval.getDataType().getNestedType());
sval1.setFieldValue("key", new IntegerFieldValue(15));
sval1.setFieldValue("value", new StringFieldValue("structval1"));
Struct sval2 = new Struct(aval.getDataType().getNestedType());
sval2.setFieldValue("key", new IntegerFieldValue(16));
sval2.setFieldValue("value", new StringFieldValue("structval2"));
aval.add(sval1);
aval.add(sval2);
}
documents.get(1).getDocument().setFieldValue("structarray", aval);
MapFieldValue<IntegerFieldValue, StringFieldValue> mval =
new MapFieldValue<>((MapDataType)documents.get(1).getDocument().getField("mymap").getDataType());
mval.put(new IntegerFieldValue(3), new StringFieldValue("a"));
mval.put(new IntegerFieldValue(5), new StringFieldValue("b"));
mval.put(new IntegerFieldValue(7), new StringFieldValue("c"));
documents.get(1).getDocument().setFieldValue("mymap", mval);
MapFieldValue<StringFieldValue, Array> amval =
new MapFieldValue<>((MapDataType)documents.get(1).getDocument().getField("structarrmap").getDataType());
amval.put(new StringFieldValue("foo"), aval);
Array<Struct> abval = new Array<>(documents.get(1).getDocument().getField("structarray").getDataType());
{
Struct sval1 = new Struct(aval.getDataType().getNestedType());
sval1.setFieldValue("key", new IntegerFieldValue(17));
sval1.setFieldValue("value", new StringFieldValue("structval3"));
Struct sval2 = new Struct(aval.getDataType().getNestedType());
sval2.setFieldValue("key", new IntegerFieldValue(18));
sval2.setFieldValue("value", new StringFieldValue("structval4"));
abval.add(sval1);
abval.add(sval2);
}
amval.put(new StringFieldValue("bar"), abval);
documents.get(1).getDocument().setFieldValue("structarrmap", amval);
WeightedSet<StringFieldValue> wsval = new WeightedSet<>(documents.get(1).getDocument().getField("stringweightedset").getDataType());
wsval.add(new StringFieldValue("foo"));
wsval.add(new StringFieldValue("val1"));
wsval.add(new StringFieldValue("val2"));
wsval.add(new StringFieldValue("val3"));
wsval.add(new StringFieldValue("val4"));
documents.get(1).getDocument().setFieldValue("stringweightedset", wsval);
Struct sval3 = new Struct(documents.get(2).getDocument().getField("mystruct").getDataType());
documents.get(2).getDocument().setFieldValue("mystruct", sval3);
Array aval2 = new Array(documents.get(2).getDocument().getField("structarray").getDataType());
documents.get(2).getDocument().setFieldValue("structarray", aval2);
Array<IntegerFieldValue> intvals1 = new Array<>(documents.get(0).getDocument().getField("intarray").getDataType());
intvals1.add(new IntegerFieldValue(12));
intvals1.add(new IntegerFieldValue(40));
intvals1.add(new IntegerFieldValue(60));
intvals1.add(new IntegerFieldValue(84));
documents.get(0).getDocument().setFieldValue("intarray", intvals1);
Array<IntegerFieldValue> intvals2 = new Array<>(documents.get(1).getDocument().getField("intarray").getDataType());
intvals2.add(new IntegerFieldValue(3));
intvals2.add(new IntegerFieldValue(56));
intvals2.add(new IntegerFieldValue(23));
intvals2.add(new IntegerFieldValue(9));
documents.get(1).getDocument().setFieldValue("intarray", intvals2);
return documents;
}
@Test
public void testOperators() throws ParseException {
List<DocumentPut> documents = createDocs();
assertEquals(Result.TRUE, evaluate("", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 < 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 < 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 <= 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 <= 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 <= 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("10 >= 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 >= 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 >= 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("10 > 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 > 10", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 == 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 == 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 != 10", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 != 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"foo\" != \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\" != \"foo\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("'foo' == \"bar\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("'foo' == \"foo\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" = \"a\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" = \"*a*\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" = \"\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"\" = \"\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" =~ \"^a$\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" =~ \"a\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" =~ \"\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"\" =~ \"\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 = 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 = 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10.2", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10.2 < 30", documents.get(0)));
assertEquals(Result.INVALID, evaluate("30 < \"foo\"", documents.get(0)));
assertEquals(Result.INVALID, evaluate("30 > \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 != \"foo\"", documents.get(0)));
assertEquals(Result.INVALID, evaluate("14.2 <= \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null == null", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null = null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null != null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null == \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("14.3 == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null = 0", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint = 24", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint = 24", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hint = 13", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint = 13", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.hfloat = 2.0", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hfloat = 1.0", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hfloat = 4.1", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hfloat > 4.09 and test.hfloat < 4.11", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.content = \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = \"bar\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.content = \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.content = \"foo\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hstring == test.content", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hstring == test.content", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.hint + 1 > 13", documents.get(1)));
DocumentPut doc1234 = documents.get(6);
assertEquals(Result.TRUE, evaluate("test.hint != 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint == 1234", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint < 1234 and false", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234 and true", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint < 1234 or true", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234 or false", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint and test.hint < 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint != null and test.hint < 1234", doc1234));
assertEquals(Result.TRUE, evaluate("test", documents.get(0)));
assertEquals(Result.FALSE, evaluate("nonexisting", documents.get(0)));
assertEquals(Result.FALSE, evaluate("nonexisting.reallynonexisting", documents.get(0)));
assertEquals(Result.INVALID, evaluate("nonexisting.reallynonexisting > 13", documents.get(0)));
assertEquals(Result.FALSE, evaluate("true.foo", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hstring", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint", doc1234));
assertEquals(Result.FALSE, evaluate("test.hstring", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hstring", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint != null", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null != test.hint", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null == test.hint", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null == test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint == null", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint != null", doc1234));
assertEquals(Result.FALSE, evaluate("null != test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint or true", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hint and true", doc1234));
assertEquals(Result.FALSE, evaluate("not test.hint and false", doc1234));
assertEquals(Result.TRUE, evaluate("id == \"id:myspace:test::anything\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate(" iD== \"id:myspace:test::anything\" ", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id == \"id:myspa:test::nything\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("Id.scHeme == \"xyz\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.scheme == \"id\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.type == \"test\"", documents.get(4)));
assertEquals(Result.FALSE, evaluate("id.type == \"wrong\"", documents.get(4)));
assertEquals(Result.TRUE, evaluate("Id.namespaCe == \"myspace\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id.NaMespace == \"pace\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific == \"anything\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.user=1234", documents.get(2)));
assertEquals(Result.TRUE, evaluate("id.user=2345", documents.get(4)));
assertEquals(Result.TRUE, evaluate("id.group=\"mygroup\"", documents.get(5)));
assertError("id.user == 1234", documents.get(0), "User identifier is null.");
assertError("id.group == 1234", documents.get(3), "Group identifier is null.");
assertError("id.group == \"yahoo\"", documents.get(3), "Group identifier is null.");
assertEquals(Result.FALSE, evaluate("true and false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true or false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("false and true or true and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("false or true and true or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("not false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("not true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true and not false or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("((243 < 300) and (\"FOO\".lowercase() == \"foo\"))", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 or false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("true and test.content = 1", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false or test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hstring.lowercase() == \"Yet\"", documents.get(3)));
assertEquals(Result.TRUE, evaluate("test.hstring.lowercase() == \"yet\"", documents.get(3)));
assertEquals(Result.FALSE, evaluate("test.hfloat.lowercase() == \"yet\"", documents.get(3)));
assertEquals(Result.TRUE, evaluate("\"bar\".hash() == -270124981", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\".hash().abs() == 270124981", documents.get(0)));
assertError("null.hash() == 22460089", documents.get(0), "Can not invoke 'hash()' on 'null' because that term evaluated to null");
assertEquals(Result.FALSE, evaluate("(0.234).hash() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("(0.234).lowercase() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\".hash() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("(234).hash() == 123", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific.hash() = 596580044", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific.hash() % 10 = 4", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id.specific.hash() % 10 = 2", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"foo\" + \"bar\" = \"foobar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\" + 4 = 25", documents.get(0)));
assertEquals(Result.FALSE, evaluate("34.0 % 4 = 4", documents.get(0)));
assertEquals(Result.TRUE, evaluate("-6 % 10 = -6", documents.get(0)));
assertEquals(Result.FALSE, evaluate("0 > now()", documents.get(0)));
assertEquals(Result.TRUE, evaluate("0 < now()", documents.get(0)));
assertEquals(Result.TRUE, evaluate("0 < now() - 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("now() - 20 < now() - 10", documents.get(0)));
long secondsNow = System.currentTimeMillis() / 1000;
assertEquals(Result.TRUE, evaluate("now() - " + secondsNow + " < 2", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.mystruct", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.mystruct == test.mystruct", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct == test.mystruct", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct != test.mystruct", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.mystruct != test.mystruct", documents.get(1)));
assertEquals(Result.INVALID, evaluate("test.mystruct < test.mystruct", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray == test.structarray", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray == test.structarray", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.key == 15", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray[4].key == 15", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray.key == 15", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].key == 16", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].key = 16", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value == \"structval1\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray[4].value == \"structval1\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray.value == \"structval1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[0].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.key = 15", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray.key = 15", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray.key = 15", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value = \"structval2\"", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray.value = \"*ctval*\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].value = \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray[1].value = \"batman\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray.value =~ \"structval[1-9]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value =~ \"structval[a-z]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct.value = \"struc?val\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct.value = \"struc?val\"", documents.get(1)));
assertEquals(Result.INVALID, evaluate("test.mystruct.value =~ \"struct.*\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct.value =~ \"struct.*\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.intarray < 5", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray < 5", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray > 80", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.intarray > 80", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray >= 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray <= 3", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray == 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray != 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$x].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$x].value == \"structval2\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mymap", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3}", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{9}", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} == \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} == \"b\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{9} == \"b\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap == \"a\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} = \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} = \"b\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} =~ \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} =~ \"b\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap.value = \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap.value = \"d\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap.value =~ \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap.value =~ \"d\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap == 3", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap == 4", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap = 3", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap = 4", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap.value[$y].key == 15 AND test.structarrmap.value[$y].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap.value[$y].key == 15 AND test.structarrmap.value[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$y}[$x].value == \"structval2\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset{val1}", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset{val1} == 1", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset{val1} == 2", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset == \"val1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset = \"val*\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset =~ \"val[0-9]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset == \"val5\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset = \"val5\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset =~ \"val5\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap{$x}.key == 15 AND test.stringweightedset{$x}", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}.key == 17 AND test.stringweightedset{$x}", documents.get(1)));
}
@Test
public void testTicket1769674() {
assertParseError("music.uri=\"junk",
"Lexical error at line -1, column 17. Encountered: <EOF> after : \"\\\"junk\"");
}
@Test
public void testThatVisitingReportsCorrectResult() throws ParseException {
assertVisitWithValidNowWorks("music.expire > now()");
assertVisitWithValidNowWorks("music.expire > now() and video.expire > now()");
assertVisitWithValidNowWorks("music.expire > now() or video");
assertVisitWithValidNowWorks("music.expire > now() or video.date < 300");
assertVisitWithValidNowWorks("video.date < 300 or music.expire > now()");
assertVisitWithValidNowWorks("video.date < 300 and music.expire > now()");
assertVisitWithValidNowWorks("music.insertdate > now() - 300 and video.expire > now() - 3600");
assertVisitWithoutNowWorks("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval1\"");
assertVisitWithoutNowWorks("music.artist.lowercase() == \"chang\"");
assertVisitWithInvalidNowFails("music.expire > now() + 300", "Arithmetic operator '+' is not supported");
assertVisitWithInvalidNowFails("music.expire < now()", "Comparison operator '<' is not supported");
assertVisitWithInvalidNowFails("music.expire >= now()", "Comparison operator '>=' is not supported");
assertVisitWithInvalidNowFails("now() > now()", "Left hand side of comparison must be a document field");
assertVisitWithInvalidNowFails("music.name.hash() > now()", "Only attribute items are supported");
}
@Test
public void testThatSelectionIsConvertedToQueries() throws ParseException {
assertThatQueriesAreCreated("music.expire > now()", Arrays.asList("music"), Arrays.asList("expire:>now(0)"));
assertThatQueriesAreCreated("music.expire > now() - 300", Arrays.asList("music"), Arrays.asList("expire:>now(300)"));
assertThatQueriesAreCreated("music.expire > now() - 300 and video.expire > now() - 3600", Arrays.asList("music", "video"), Arrays.asList("expire:>now(300)", "expire:>now(3600)"));
assertThatQueriesAreCreated("music.expire > now() - 300 or video", Arrays.asList("music"), Arrays.asList("expire:>now(300)"));
assertVisitWithInvalidNowFails("music.field1 > now() - 300 and music.field2 > now() - 300", "Specifying multiple document types is not allowed");
assertVisitWithInvalidNowFails("music.field1 > now() - 300 and video.field1 > now() and music.field2 > now() - 300", "Specifying multiple document types is not allowed");
assertVisitWithInvalidNowFails("now() > music.field", "Left hand side of comparison must be a document field");
}
public void assertThatQueriesAreCreated(String selection, List<String> expectedDoctypes, List<String> expectedQueries) throws ParseException {
DocumentSelector selector = new DocumentSelector(selection);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
selector.visit(converter);
Map<String, String> queryMap = converter.getQueryMap();
assertEquals(expectedQueries.size(), queryMap.size());
for (int i = 0; i < expectedQueries.size(); i++) {
assertTrue(queryMap.containsKey(expectedDoctypes.get(i)));
assertEquals(expectedQueries.get(i), queryMap.get(expectedDoctypes.get(i)));
}
}
public void assertVisitWithoutNowWorks(String expression) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertFalse(visitor.requiresConversion());
}
public void assertVisitWithValidNowWorks(String expression) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
try {
selector.visit(converter);
} catch (Exception e) {
assertFalse("Converter throws exception : " + e.getMessage(), true);
}
}
public void assertVisitWithInvalidNowFails(String expression, String expectedError) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
try {
selector.visit(converter);
assertFalse("Should not be able to convert " + expression + " query", true);
} catch (Exception e) {
assertEquals(expectedError, e.getMessage());
}
}
private static DocumentPut createDocument(String id, Integer hInt, float hFloat, String hString, String content) {
Document doc = new Document(manager.getDocumentType("test"), new DocumentId(id));
if (hInt != null)
doc.setFieldValue("hint", new IntegerFieldValue(hInt));
doc.setFieldValue("hfloat", new FloatFieldValue(hFloat));
if (hString != null)
doc.setFieldValue("hstring", new StringFieldValue(hString));
doc.setFieldValue("content", new StringFieldValue(content));
return new DocumentPut(doc);
}
private static void assertParse(String expression) throws ParseException {
assertParse(expression, expression);
}
private static void assertParse(String expectedString, String expressionString) throws ParseException {
DocumentSelector selector = new DocumentSelector(expressionString);
if (expectedString != null) {
assertEquals(expectedString, selector.toString());
}
}
private static void assertParseError(String expressionString, String expectedError) {
try {
new DocumentSelector(expressionString);
fail("The expression '" + expressionString + "' should throw an exception.");
}
catch (ParseException e) {
Throwable t = e;
if (t.getCause() instanceof TokenMgrException) {
t = t.getCause();
}
assertEquals(expectedError, Exceptions.toMessageString(t).substring(0, expectedError.length()));
}
}
private static Result evaluate(String expressionString, DocumentOperation op) throws ParseException {
return new DocumentSelector(expressionString).accepts(op);
}
private static void assertError(String expressionString, DocumentOperation op, String expectedError) {
try {
evaluate(expressionString, op);
fail("The evaluation of '" + expressionString + "' should throw an exception.");
} catch (ParseException e) {
fail("The expression '" + expressionString + "' should assertEquals ok.");
} catch (RuntimeException e) {
System.err.println("Error was : " + e);
assertTrue(e.getMessage().length() >= expectedError.length());
assertEquals(expectedError, e.getMessage().substring(0, expectedError.length()));
}
}
} | class DocumentSelectorTestCase {
private static DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentType type = new DocumentType("test");
type.addHeaderField("hint", DataType.INT);
type.addHeaderField("hfloat", DataType.FLOAT);
type.addHeaderField("hstring", DataType.STRING);
type.addField("content", DataType.STRING);
StructDataType mystruct = new StructDataType("mystruct");
mystruct.addField(new Field("key", DataType.INT, false));
mystruct.addField(new Field("value", DataType.STRING, false));
type.addHeaderField("mystruct", mystruct);
ArrayDataType structarray = new ArrayDataType(mystruct);
type.addField("structarray", structarray);
type.addField("stringweightedset", new WeightedSetDataType(DataType.STRING, false, false));
type.addField("mymap", new MapDataType(DataType.INT, DataType.STRING));
type.addField("structarrmap", new MapDataType(DataType.STRING, structarray));
ArrayDataType intarray = new ArrayDataType(DataType.INT);
type.addField("intarray", intarray);
manager.registerDocumentType(type);
manager.registerDocumentType(new DocumentType("notandor"));
manager.registerDocumentType(new DocumentType("ornotand"));
manager.registerDocumentType(new DocumentType("andornot"));
manager.registerDocumentType(new DocumentType("idid"));
manager.registerDocumentType(new DocumentType("usergroup"));
}
@Test
public void testParsing() throws ParseException {
assertParse("3.14 > 0");
assertParse("-999 > 0");
assertParse("150000.0 > 0", "15e4 > 0");
assertParse("3.4E-4 > 0", "3.4e-4 > 0");
assertParse("\" Test \" = \"*\"");
assertParse("id = \"*\"", "id = '*'");
assertParse("id.group == 3");
assertParse("id.namespace = \"*\"");
assertParse("id.hash() > 0");
assertParse("id.namespace.hash() > 0");
assertParse("music.artist = \"*\"");
assertParse("music.artist.lowercase() = \"*\"");
assertParse("music_.artist = \"*\"");
assertParse("music_foo.artist = \"*\"");
assertParse("music_foo_.artist = \"*\"");
assertParse("(4 + 3) > 0", "(4+3) > 0");
assertParse("1 + 1 > 0", "1 +1 > 0");
assertParse("1 + -1 > 0", "1 + -1 > 0");
assertParse("1 + 1.0 > 0", "1 + +1.0 > 0");
assertParse("1 - 1 > 0", "1 -1 > 0");
assertParse("1 - -1 > 0", "1 - -1 > 0");
assertParse("1 - 1.0 > 0", "1 - +1.0 > 0");
assertParse("1 + 2 * 3 - 10 % 2 / 3 > 0", "1 +2 * 3- 10%2 /3 > 0");
assertParse("((43 + 14) / 34) > 0");
assertParse("(34 * ((3 - 1) % 4)) > 0");
assertParse("true");
assertParse("false");
assertParse("music");
assertParse("(music or book)");
assertParse("music or book", "music or book");
assertParse("(music or (book and video))");
assertParse("music or (book and video)", "music or (book and video)");
assertParse("((music or book) and video)");
assertParse("(music or book) and video", "(music or book) and video");
assertParse("music.test > 0");
assertParse("music.artist = \"*john*\"");
assertParse("music.length >= 180");
assertParse("true or not false and true", "true oR nOt false And true");
assertParse("(true or false) and true", "(true oR false) aNd true");
assertParse("music.expire > now()");
assertParse("music.expire > now() - 300");
assertParse("now or now_search");
assertParse("(music.expire / 1000) > (now() - 300)");
}
@Test
public void testReservedWords() throws ParseException {
assertParse(null, "id == 'id' or id_t or idtype");
assertParse(null, "id.scheme == 'scheme' or scheme_t or schemetype");
assertParse(null, "id.namespace == 'namespace' or namespace_t or namespacetype");
assertParse(null, "id.specific == 'specific' or specific_t or specifictype");
assertParse(null, "id.user == 'user' or user_t or usertype");
assertParse(null, "id.group == 'group' or group_t or grouptype");
assertParse(null, "id.bucket == 'bucket' or bucket_t or buckettype");
assertParse(null, "null == 'null' or null_t or nulltype");
assertParse(null, "true or true_t or truetype");
assertParse(null, "false or false_t or falsetype");
assertParse(null, "true or and_t or andtype");
assertParse(null, "true or or_t or ortype");
}
@Test
public void testCjkParsing() throws ParseException {
assertParse("music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"",
"music.artist = \"\u4f73\u80fd\u7d22\u5c3c\u60e0\u666e\"");
assertParse("music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"",
"music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"");
}
@Test
public void testParseTerminals() throws ParseException {
assertParse("true");
assertParse("music.hmm == 123");
assertParse("music.hmm == 123.53", "music.hmm == +123.53");
assertParse("music.hmm == -123.5");
assertParse("music.hmm == 2.3412352E8", "music.hmm == 234123.52e3");
assertParse("music.hmm == -234.12352", "music.hmm == -234123.52E-3");
assertParse("music.hmm < aaa");
assertParse("music.hmm == \"test\"");
assertParse("music.hmm{test} == \"test\"");
assertParse("music.hmm{test}.foo[3].key == \"test\"");
assertParse("music.hmm == \"te st \"");
assertParse("music.hmm == \"test\"", " \t music.hmm\t== \t \"test\"\t");
assertParse("music.hmm == \"tab\\ttest\"");
assertParse("music.hmm == \"tab\\u0666test\"", "music.hmm == \"tab\\u0666test\"");
assertParse("music.hmm == \"tabcomplete\"", "music.hmm == \"tabcomplete\"");
assertParse("music.hmm == \"tabysf\"", "music.hmm == \"tab\\ysf\"");
assertParse("music.h == \"\\ttx48 \\n\"", "music.h == \"\\tt\\x48 \\n\"");
assertParseError("music.hmm <> 12", "Exception parsing document selector 'music.hmm <> 12': Encountered \" \">\" \">\"\" at line 1, column 12.");
assertParse("music.hmm >= 123");
assertParse("music.hmm > 123");
assertParse("music.hmm <= 123");
assertParse("music.hmm < 123");
assertParse("music.hmm != 123");
assertParse("music.hmm");
assertParse("true", "TRUE");
assertParse("false", "FALSE");
assertParse("true", "true");
assertParse("false", "false");
assertParse("false", "faLSe");
assertParse("mytype");
assertParse("id == \"id:ns:mytype::mytest\"");
assertParse("id.namespace == \"myspace\"");
assertParse("id.scheme == \"id\"");
assertParse("id.type == \"mytype\"");
assertParse("id.user == 1234");
assertParse("id.bucket == 8388608", "id.bucket == 0x800000");
assertParse("id.bucket == 8429568", "id.bucket == 0x80a000");
assertParse("id.bucket == -9223372036854775566",
"id.bucket == 0x80000000000000f2");
assertParse("id.group == \"yahoo.com\"");
assertParse("id.specific == \"mypart\"");
assertParse("id.scheme = \"*doc\"");
assertParse("music.artist =~ \"(john|barry|shrek)\"");
assertParse("id.hash() == 124");
assertParse("id.specific.hash() == 124");
assertParse("music.artist.lowercase() == \"chang\"");
assertParse("music.artist.lowercase().hash() == 124");
assertParse("music.version() == 8");
assertParse("music == 8");
assertParse("(123) < (200)", "(123) < (200)");
assertParse("(\"hmm\") < (id.scheme)", "(\"hmm\") < (id.scheme)");
assertParse("(1 + 2) > 1");
assertParse("1 + 2 > 1", "1 + 2 > 1");
assertParse("(1 - 2) > 1");
assertParse("(1 * 2) > 1");
assertParse("(1 / 2) > 1");
assertParse("(1 % 2) > 1");
assertParse("((1 + 2) * (4 - 2)) == 1");
assertParse("(1 + 2) * (4 - 2) == 1", "(1 + 2) * (4 - 2) == 1");
assertParse("((23 + 643) / (34 % 10)) > 34");
assertParse("23 + 643 / 34 % 10 > 34", "23 + 643 / 34 % 10 > 34");
}
@Test
public void testParseReservedTokens() throws ParseException {
assertParse("user.fieldName == \"fieldValue\"");
assertParse("documentName.user == \"fieldValue\"");
assertParse("group.fieldName == \"fieldValue\"");
assertParse("documentName.group == \"fieldValue\"");
}
@Test
public void testParseBranches() throws ParseException {
assertParse("((true or false) and (false or true))");
assertParse("(true or (not false and not true))");
assertParse("((243) < 300 and (\"FOO\").lowercase() == (\"foo\"))");
}
@Test
@Test
public void testDocumentRemove() throws ParseException {
assertEquals(Result.TRUE, evaluate("test", createRemove("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test", createRemove("id:ns:null::1")));
assertEquals(Result.TRUE, evaluate("test", createRemove("id:ns:test:n=1234:1")));
assertEquals(Result.INVALID, evaluate("test.hint", createRemove("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test.hint", createRemove("id:ns:null::1")));
assertEquals(Result.INVALID, evaluate("test.hint == 0", createRemove("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test.anything", createRemove("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test and test.hint == 0", createRemove("id:ns:test::1")));
}
private DocumentRemove createRemove(String docId) {
return new DocumentRemove(new DocumentId(docId));
}
@Test
public void testDocumentGet() throws ParseException {
assertEquals(Result.TRUE, evaluate("test", createGet("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test", createGet("id:ns:null::1")));
assertEquals(Result.TRUE, evaluate("test", createGet("id:ns:test:n=1234:1")));
assertEquals(Result.INVALID, evaluate("test.hint", createGet("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test.hint", createGet("id:ns:null::1")));
assertEquals(Result.INVALID, evaluate("test.hint == 0", createGet("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test.anything", createGet("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test and test.hint == 0", createGet("id:ns:test::1")));
}
private DocumentGet createGet(String docId) {
return new DocumentGet(new DocumentId(docId));
}
@Test
public void testInvalidLogic() throws ParseException {
DocumentPut put = new DocumentPut(manager.getDocumentType("test"), new DocumentId("id:ns:test::"));
DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:ns:test::"));
assertEquals(Result.FALSE, evaluate("test.content", put));
assertEquals(Result.INVALID, evaluate("test.content", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1", put));
assertEquals(Result.INVALID, evaluate("test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 and true", put));
assertEquals(Result.INVALID, evaluate("test.content = 1 and true", upd));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", put));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", put));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 or false", put));
assertEquals(Result.INVALID, evaluate("test.content = 1 or false", upd));
assertEquals(Result.FALSE, evaluate("true and test.content = 1", put));
assertEquals(Result.INVALID, evaluate("true and test.content = 1", upd));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", put));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", put));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("false or test.content = 1", put));
assertEquals(Result.INVALID, evaluate("false or test.content = 1", upd));
}
List<DocumentPut> createDocs() {
List<DocumentPut> documents = new ArrayList<>();
documents.add(createDocument("id:myspace:test::anything", 24, 2.0f, "foo", "bar"));
documents.add(createDocument("id:anotherspace:test::foo", 13, 4.1f, "bar", "foo"));
documents.add(createDocument("id:myspace:test:n=1234:mail1", 15, 1.0f, "some", "some"));
documents.add(createDocument("id:myspace:test:n=5678:bar", 14, 2.4f, "Yet", "More"));
documents.add(createDocument("id:myspace:test:n=2345:mail2", 15, 1.0f, "bar", "baz"));
documents.add(createDocument("id:myspace:test:g=mygroup:qux", 15, 1.0f, "quux", "corge"));
documents.add(createDocument("id:myspace:test::missingint", null, 2.0f, null, "bar"));
Struct sval = new Struct(documents.get(1).getDocument().getField("mystruct").getDataType());
sval.setFieldValue("key", new IntegerFieldValue(14));
sval.setFieldValue("value", new StringFieldValue("structval"));
documents.get(1).getDocument().setFieldValue("mystruct", sval);
Array<Struct> aval = new Array<>(documents.get(1).getDocument().getField("structarray").getDataType());
{
Struct sval1 = new Struct(aval.getDataType().getNestedType());
sval1.setFieldValue("key", new IntegerFieldValue(15));
sval1.setFieldValue("value", new StringFieldValue("structval1"));
Struct sval2 = new Struct(aval.getDataType().getNestedType());
sval2.setFieldValue("key", new IntegerFieldValue(16));
sval2.setFieldValue("value", new StringFieldValue("structval2"));
aval.add(sval1);
aval.add(sval2);
}
documents.get(1).getDocument().setFieldValue("structarray", aval);
MapFieldValue<IntegerFieldValue, StringFieldValue> mval =
new MapFieldValue<>((MapDataType)documents.get(1).getDocument().getField("mymap").getDataType());
mval.put(new IntegerFieldValue(3), new StringFieldValue("a"));
mval.put(new IntegerFieldValue(5), new StringFieldValue("b"));
mval.put(new IntegerFieldValue(7), new StringFieldValue("c"));
documents.get(1).getDocument().setFieldValue("mymap", mval);
MapFieldValue<StringFieldValue, Array> amval =
new MapFieldValue<>((MapDataType)documents.get(1).getDocument().getField("structarrmap").getDataType());
amval.put(new StringFieldValue("foo"), aval);
Array<Struct> abval = new Array<>(documents.get(1).getDocument().getField("structarray").getDataType());
{
Struct sval1 = new Struct(aval.getDataType().getNestedType());
sval1.setFieldValue("key", new IntegerFieldValue(17));
sval1.setFieldValue("value", new StringFieldValue("structval3"));
Struct sval2 = new Struct(aval.getDataType().getNestedType());
sval2.setFieldValue("key", new IntegerFieldValue(18));
sval2.setFieldValue("value", new StringFieldValue("structval4"));
abval.add(sval1);
abval.add(sval2);
}
amval.put(new StringFieldValue("bar"), abval);
documents.get(1).getDocument().setFieldValue("structarrmap", amval);
WeightedSet<StringFieldValue> wsval = new WeightedSet<>(documents.get(1).getDocument().getField("stringweightedset").getDataType());
wsval.add(new StringFieldValue("foo"));
wsval.add(new StringFieldValue("val1"));
wsval.add(new StringFieldValue("val2"));
wsval.add(new StringFieldValue("val3"));
wsval.add(new StringFieldValue("val4"));
documents.get(1).getDocument().setFieldValue("stringweightedset", wsval);
Struct sval3 = new Struct(documents.get(2).getDocument().getField("mystruct").getDataType());
documents.get(2).getDocument().setFieldValue("mystruct", sval3);
Array aval2 = new Array(documents.get(2).getDocument().getField("structarray").getDataType());
documents.get(2).getDocument().setFieldValue("structarray", aval2);
Array<IntegerFieldValue> intvals1 = new Array<>(documents.get(0).getDocument().getField("intarray").getDataType());
intvals1.add(new IntegerFieldValue(12));
intvals1.add(new IntegerFieldValue(40));
intvals1.add(new IntegerFieldValue(60));
intvals1.add(new IntegerFieldValue(84));
documents.get(0).getDocument().setFieldValue("intarray", intvals1);
Array<IntegerFieldValue> intvals2 = new Array<>(documents.get(1).getDocument().getField("intarray").getDataType());
intvals2.add(new IntegerFieldValue(3));
intvals2.add(new IntegerFieldValue(56));
intvals2.add(new IntegerFieldValue(23));
intvals2.add(new IntegerFieldValue(9));
documents.get(1).getDocument().setFieldValue("intarray", intvals2);
return documents;
}
@Test
public void testOperators() throws ParseException {
List<DocumentPut> documents = createDocs();
assertEquals(Result.TRUE, evaluate("", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 < 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 < 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 <= 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 <= 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 <= 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("10 >= 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 >= 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 >= 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("10 > 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 > 10", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 == 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 == 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 != 10", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 != 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"foo\" != \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\" != \"foo\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("'foo' == \"bar\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("'foo' == \"foo\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" = \"a\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" = \"*a*\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" = \"\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"\" = \"\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" =~ \"^a$\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" =~ \"a\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" =~ \"\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"\" =~ \"\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 = 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 = 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10.2", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10.2 < 30", documents.get(0)));
assertEquals(Result.INVALID, evaluate("30 < \"foo\"", documents.get(0)));
assertEquals(Result.INVALID, evaluate("30 > \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 != \"foo\"", documents.get(0)));
assertEquals(Result.INVALID, evaluate("14.2 <= \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null == null", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null = null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null != null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null == \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("14.3 == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null = 0", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint = 24", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint = 24", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hint = 13", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint = 13", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.hfloat = 2.0", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hfloat = 1.0", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hfloat = 4.1", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hfloat > 4.09 and test.hfloat < 4.11", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.content = \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = \"bar\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.content = \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.content = \"foo\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hstring == test.content", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hstring == test.content", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.hint + 1 > 13", documents.get(1)));
DocumentPut doc1234 = documents.get(6);
assertEquals(Result.TRUE, evaluate("test.hint != 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint == 1234", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint < 1234 and false", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234 and true", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint < 1234 or true", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234 or false", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint and test.hint < 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint != null and test.hint < 1234", doc1234));
assertEquals(Result.TRUE, evaluate("test", documents.get(0)));
assertEquals(Result.FALSE, evaluate("nonexisting", documents.get(0)));
assertEquals(Result.FALSE, evaluate("nonexisting.reallynonexisting", documents.get(0)));
assertEquals(Result.INVALID, evaluate("nonexisting.reallynonexisting > 13", documents.get(0)));
assertEquals(Result.FALSE, evaluate("true.foo", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hstring", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint", doc1234));
assertEquals(Result.FALSE, evaluate("test.hstring", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hstring", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint != null", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null != test.hint", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null == test.hint", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null == test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint == null", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint != null", doc1234));
assertEquals(Result.FALSE, evaluate("null != test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint or true", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hint and true", doc1234));
assertEquals(Result.FALSE, evaluate("not test.hint and false", doc1234));
assertEquals(Result.TRUE, evaluate("id == \"id:myspace:test::anything\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate(" iD== \"id:myspace:test::anything\" ", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id == \"id:myspa:test::nything\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("Id.scHeme == \"xyz\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.scheme == \"id\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.type == \"test\"", documents.get(4)));
assertEquals(Result.FALSE, evaluate("id.type == \"wrong\"", documents.get(4)));
assertEquals(Result.TRUE, evaluate("Id.namespaCe == \"myspace\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id.NaMespace == \"pace\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific == \"anything\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.user=1234", documents.get(2)));
assertEquals(Result.TRUE, evaluate("id.user=2345", documents.get(4)));
assertEquals(Result.TRUE, evaluate("id.group=\"mygroup\"", documents.get(5)));
assertError("id.user == 1234", documents.get(0), "User identifier is null.");
assertError("id.group == 1234", documents.get(3), "Group identifier is null.");
assertError("id.group == \"yahoo\"", documents.get(3), "Group identifier is null.");
assertEquals(Result.FALSE, evaluate("true and false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true or false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("false and true or true and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("false or true and true or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("not false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("not true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true and not false or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("((243 < 300) and (\"FOO\".lowercase() == \"foo\"))", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 or false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("true and test.content = 1", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false or test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hstring.lowercase() == \"Yet\"", documents.get(3)));
assertEquals(Result.TRUE, evaluate("test.hstring.lowercase() == \"yet\"", documents.get(3)));
assertEquals(Result.FALSE, evaluate("test.hfloat.lowercase() == \"yet\"", documents.get(3)));
assertEquals(Result.TRUE, evaluate("\"bar\".hash() == -270124981", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\".hash().abs() == 270124981", documents.get(0)));
assertError("null.hash() == 22460089", documents.get(0), "Can not invoke 'hash()' on 'null' because that term evaluated to null");
assertEquals(Result.FALSE, evaluate("(0.234).hash() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("(0.234).lowercase() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\".hash() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("(234).hash() == 123", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific.hash() = 596580044", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific.hash() % 10 = 4", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id.specific.hash() % 10 = 2", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"foo\" + \"bar\" = \"foobar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\" + 4 = 25", documents.get(0)));
assertEquals(Result.FALSE, evaluate("34.0 % 4 = 4", documents.get(0)));
assertEquals(Result.TRUE, evaluate("-6 % 10 = -6", documents.get(0)));
assertEquals(Result.FALSE, evaluate("0 > now()", documents.get(0)));
assertEquals(Result.TRUE, evaluate("0 < now()", documents.get(0)));
assertEquals(Result.TRUE, evaluate("0 < now() - 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("now() - 20 < now() - 10", documents.get(0)));
long secondsNow = System.currentTimeMillis() / 1000;
assertEquals(Result.TRUE, evaluate("now() - " + secondsNow + " < 2", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.mystruct", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.mystruct == test.mystruct", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct == test.mystruct", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct != test.mystruct", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.mystruct != test.mystruct", documents.get(1)));
assertEquals(Result.INVALID, evaluate("test.mystruct < test.mystruct", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray == test.structarray", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray == test.structarray", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.key == 15", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray[4].key == 15", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray.key == 15", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].key == 16", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].key = 16", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value == \"structval1\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray[4].value == \"structval1\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray.value == \"structval1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[0].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.key = 15", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray.key = 15", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray.key = 15", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value = \"structval2\"", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray.value = \"*ctval*\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].value = \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray[1].value = \"batman\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray.value =~ \"structval[1-9]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value =~ \"structval[a-z]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct.value = \"struc?val\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct.value = \"struc?val\"", documents.get(1)));
assertEquals(Result.INVALID, evaluate("test.mystruct.value =~ \"struct.*\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct.value =~ \"struct.*\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.intarray < 5", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray < 5", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray > 80", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.intarray > 80", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray >= 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray <= 3", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray == 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray != 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$x].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$x].value == \"structval2\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mymap", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3}", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{9}", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} == \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} == \"b\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{9} == \"b\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap == \"a\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} = \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} = \"b\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} =~ \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} =~ \"b\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap.value = \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap.value = \"d\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap.value =~ \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap.value =~ \"d\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap == 3", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap == 4", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap = 3", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap = 4", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap.value[$y].key == 15 AND test.structarrmap.value[$y].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap.value[$y].key == 15 AND test.structarrmap.value[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$y}[$x].value == \"structval2\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset{val1}", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset{val1} == 1", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset{val1} == 2", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset == \"val1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset = \"val*\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset =~ \"val[0-9]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset == \"val5\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset = \"val5\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset =~ \"val5\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap{$x}.key == 15 AND test.stringweightedset{$x}", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}.key == 17 AND test.stringweightedset{$x}", documents.get(1)));
}
@Test
public void testTicket1769674() {
assertParseError("music.uri=\"junk",
"Lexical error at line -1, column 17. Encountered: <EOF> after : \"\\\"junk\"");
}
@Test
public void testThatVisitingReportsCorrectResult() throws ParseException {
assertVisitWithValidNowWorks("music.expire > now()");
assertVisitWithValidNowWorks("music.expire > now() and video.expire > now()");
assertVisitWithValidNowWorks("music.expire > now() or video");
assertVisitWithValidNowWorks("music.expire > now() or video.date < 300");
assertVisitWithValidNowWorks("video.date < 300 or music.expire > now()");
assertVisitWithValidNowWorks("video.date < 300 and music.expire > now()");
assertVisitWithValidNowWorks("music.insertdate > now() - 300 and video.expire > now() - 3600");
assertVisitWithoutNowWorks("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval1\"");
assertVisitWithoutNowWorks("music.artist.lowercase() == \"chang\"");
assertVisitWithInvalidNowFails("music.expire > now() + 300", "Arithmetic operator '+' is not supported");
assertVisitWithInvalidNowFails("music.expire < now()", "Comparison operator '<' is not supported");
assertVisitWithInvalidNowFails("music.expire >= now()", "Comparison operator '>=' is not supported");
assertVisitWithInvalidNowFails("now() > now()", "Left hand side of comparison must be a document field");
assertVisitWithInvalidNowFails("music.name.hash() > now()", "Only attribute items are supported");
}
@Test
public void testThatSelectionIsConvertedToQueries() throws ParseException {
assertThatQueriesAreCreated("music.expire > now()", Arrays.asList("music"), Arrays.asList("expire:>now(0)"));
assertThatQueriesAreCreated("music.expire > now() - 300", Arrays.asList("music"), Arrays.asList("expire:>now(300)"));
assertThatQueriesAreCreated("music.expire > now() - 300 and video.expire > now() - 3600", Arrays.asList("music", "video"), Arrays.asList("expire:>now(300)", "expire:>now(3600)"));
assertThatQueriesAreCreated("music.expire > now() - 300 or video", Arrays.asList("music"), Arrays.asList("expire:>now(300)"));
assertVisitWithInvalidNowFails("music.field1 > now() - 300 and music.field2 > now() - 300", "Specifying multiple document types is not allowed");
assertVisitWithInvalidNowFails("music.field1 > now() - 300 and video.field1 > now() and music.field2 > now() - 300", "Specifying multiple document types is not allowed");
assertVisitWithInvalidNowFails("now() > music.field", "Left hand side of comparison must be a document field");
}
public void assertThatQueriesAreCreated(String selection, List<String> expectedDoctypes, List<String> expectedQueries) throws ParseException {
DocumentSelector selector = new DocumentSelector(selection);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
selector.visit(converter);
Map<String, String> queryMap = converter.getQueryMap();
assertEquals(expectedQueries.size(), queryMap.size());
for (int i = 0; i < expectedQueries.size(); i++) {
assertTrue(queryMap.containsKey(expectedDoctypes.get(i)));
assertEquals(expectedQueries.get(i), queryMap.get(expectedDoctypes.get(i)));
}
}
public void assertVisitWithoutNowWorks(String expression) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertFalse(visitor.requiresConversion());
}
public void assertVisitWithValidNowWorks(String expression) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
try {
selector.visit(converter);
} catch (Exception e) {
assertFalse("Converter throws exception : " + e.getMessage(), true);
}
}
public void assertVisitWithInvalidNowFails(String expression, String expectedError) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
try {
selector.visit(converter);
assertFalse("Should not be able to convert " + expression + " query", true);
} catch (Exception e) {
assertEquals(expectedError, e.getMessage());
}
}
private static DocumentPut createDocument(String id, Integer hInt, float hFloat, String hString, String content) {
Document doc = new Document(manager.getDocumentType("test"), new DocumentId(id));
if (hInt != null)
doc.setFieldValue("hint", new IntegerFieldValue(hInt));
doc.setFieldValue("hfloat", new FloatFieldValue(hFloat));
if (hString != null)
doc.setFieldValue("hstring", new StringFieldValue(hString));
doc.setFieldValue("content", new StringFieldValue(content));
return new DocumentPut(doc);
}
private static void assertParse(String expression) throws ParseException {
assertParse(expression, expression);
}
private static void assertParse(String expectedString, String expressionString) throws ParseException {
DocumentSelector selector = new DocumentSelector(expressionString);
if (expectedString != null) {
assertEquals(expectedString, selector.toString());
}
}
private static void assertParseError(String expressionString, String expectedError) {
try {
new DocumentSelector(expressionString);
fail("The expression '" + expressionString + "' should throw an exception.");
}
catch (ParseException e) {
Throwable t = e;
if (t.getCause() instanceof TokenMgrException) {
t = t.getCause();
}
assertEquals(expectedError, Exceptions.toMessageString(t).substring(0, expectedError.length()));
}
}
private static Result evaluate(String expressionString, DocumentOperation op) throws ParseException {
return new DocumentSelector(expressionString).accepts(op);
}
private static void assertError(String expressionString, DocumentOperation op, String expectedError) {
try {
evaluate(expressionString, op);
fail("The evaluation of '" + expressionString + "' should throw an exception.");
} catch (ParseException e) {
fail("The expression '" + expressionString + "' should assertEquals ok.");
} catch (RuntimeException e) {
System.err.println("Error was : " + e);
assertTrue(e.getMessage().length() >= expectedError.length());
assertEquals(expectedError, e.getMessage().substring(0, expectedError.length()));
}
}
} |
Fixed | public void testDocumentUpdate() throws ParseException {
DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:myspace:test::adocnything"));
assertEquals(Result.TRUE, evaluate("test", upd));
assertEquals(Result.FALSE, evaluate("music", upd));
assertEquals(Result.TRUE, evaluate("test or music", upd));
assertEquals(Result.FALSE, evaluate("test and music", upd));
assertEquals(Result.INVALID, evaluate("test.hint", upd));
assertEquals(Result.INVALID, evaluate("test.anything", upd));
assertEquals(Result.INVALID, evaluate("test.hint < 24", upd));
} | DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:myspace:test::adocnything")); | public void testDocumentUpdate() throws ParseException {
DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:myspace:test::anything"));
assertEquals(Result.TRUE, evaluate("test", upd));
assertEquals(Result.FALSE, evaluate("music", upd));
assertEquals(Result.TRUE, evaluate("test or music", upd));
assertEquals(Result.FALSE, evaluate("test and music", upd));
assertEquals(Result.INVALID, evaluate("test.hint", upd));
assertEquals(Result.INVALID, evaluate("test.anything", upd));
assertEquals(Result.INVALID, evaluate("test.hint < 24", upd));
} | class DocumentSelectorTestCase {
private static DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentType type = new DocumentType("test");
type.addHeaderField("hint", DataType.INT);
type.addHeaderField("hfloat", DataType.FLOAT);
type.addHeaderField("hstring", DataType.STRING);
type.addField("content", DataType.STRING);
StructDataType mystruct = new StructDataType("mystruct");
mystruct.addField(new Field("key", DataType.INT, false));
mystruct.addField(new Field("value", DataType.STRING, false));
type.addHeaderField("mystruct", mystruct);
ArrayDataType structarray = new ArrayDataType(mystruct);
type.addField("structarray", structarray);
type.addField("stringweightedset", new WeightedSetDataType(DataType.STRING, false, false));
type.addField("mymap", new MapDataType(DataType.INT, DataType.STRING));
type.addField("structarrmap", new MapDataType(DataType.STRING, structarray));
ArrayDataType intarray = new ArrayDataType(DataType.INT);
type.addField("intarray", intarray);
manager.registerDocumentType(type);
manager.registerDocumentType(new DocumentType("notandor"));
manager.registerDocumentType(new DocumentType("ornotand"));
manager.registerDocumentType(new DocumentType("andornot"));
manager.registerDocumentType(new DocumentType("idid"));
manager.registerDocumentType(new DocumentType("usergroup"));
}
@Test
public void testParsing() throws ParseException {
assertParse("3.14 > 0");
assertParse("-999 > 0");
assertParse("150000.0 > 0", "15e4 > 0");
assertParse("3.4E-4 > 0", "3.4e-4 > 0");
assertParse("\" Test \" = \"*\"");
assertParse("id = \"*\"", "id = '*'");
assertParse("id.group == 3");
assertParse("id.namespace = \"*\"");
assertParse("id.hash() > 0");
assertParse("id.namespace.hash() > 0");
assertParse("music.artist = \"*\"");
assertParse("music.artist.lowercase() = \"*\"");
assertParse("music_.artist = \"*\"");
assertParse("music_foo.artist = \"*\"");
assertParse("music_foo_.artist = \"*\"");
assertParse("(4 + 3) > 0", "(4+3) > 0");
assertParse("1 + 1 > 0", "1 +1 > 0");
assertParse("1 + -1 > 0", "1 + -1 > 0");
assertParse("1 + 1.0 > 0", "1 + +1.0 > 0");
assertParse("1 - 1 > 0", "1 -1 > 0");
assertParse("1 - -1 > 0", "1 - -1 > 0");
assertParse("1 - 1.0 > 0", "1 - +1.0 > 0");
assertParse("1 + 2 * 3 - 10 % 2 / 3 > 0", "1 +2 * 3- 10%2 /3 > 0");
assertParse("((43 + 14) / 34) > 0");
assertParse("(34 * ((3 - 1) % 4)) > 0");
assertParse("true");
assertParse("false");
assertParse("music");
assertParse("(music or book)");
assertParse("music or book", "music or book");
assertParse("(music or (book and video))");
assertParse("music or (book and video)", "music or (book and video)");
assertParse("((music or book) and video)");
assertParse("(music or book) and video", "(music or book) and video");
assertParse("music.test > 0");
assertParse("music.artist = \"*john*\"");
assertParse("music.length >= 180");
assertParse("true or not false and true", "true oR nOt false And true");
assertParse("(true or false) and true", "(true oR false) aNd true");
assertParse("music.expire > now()");
assertParse("music.expire > now() - 300");
assertParse("now or now_search");
assertParse("(music.expire / 1000) > (now() - 300)");
}
@Test
public void testReservedWords() throws ParseException {
assertParse(null, "id == 'id' or id_t or idtype");
assertParse(null, "id.scheme == 'scheme' or scheme_t or schemetype");
assertParse(null, "id.namespace == 'namespace' or namespace_t or namespacetype");
assertParse(null, "id.specific == 'specific' or specific_t or specifictype");
assertParse(null, "id.user == 'user' or user_t or usertype");
assertParse(null, "id.group == 'group' or group_t or grouptype");
assertParse(null, "id.bucket == 'bucket' or bucket_t or buckettype");
assertParse(null, "null == 'null' or null_t or nulltype");
assertParse(null, "true or true_t or truetype");
assertParse(null, "false or false_t or falsetype");
assertParse(null, "true or and_t or andtype");
assertParse(null, "true or or_t or ortype");
}
@Test
public void testCjkParsing() throws ParseException {
assertParse("music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"",
"music.artist = \"\u4f73\u80fd\u7d22\u5c3c\u60e0\u666e\"");
assertParse("music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"",
"music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"");
}
@Test
public void testParseTerminals() throws ParseException {
assertParse("true");
assertParse("music.hmm == 123");
assertParse("music.hmm == 123.53", "music.hmm == +123.53");
assertParse("music.hmm == -123.5");
assertParse("music.hmm == 2.3412352E8", "music.hmm == 234123.52e3");
assertParse("music.hmm == -234.12352", "music.hmm == -234123.52E-3");
assertParse("music.hmm < aaa");
assertParse("music.hmm == \"test\"");
assertParse("music.hmm{test} == \"test\"");
assertParse("music.hmm{test}.foo[3].key == \"test\"");
assertParse("music.hmm == \"te st \"");
assertParse("music.hmm == \"test\"", " \t music.hmm\t== \t \"test\"\t");
assertParse("music.hmm == \"tab\\ttest\"");
assertParse("music.hmm == \"tab\\u0666test\"", "music.hmm == \"tab\\u0666test\"");
assertParse("music.hmm == \"tabcomplete\"", "music.hmm == \"tabcomplete\"");
assertParse("music.hmm == \"tabysf\"", "music.hmm == \"tab\\ysf\"");
assertParse("music.h == \"\\ttx48 \\n\"", "music.h == \"\\tt\\x48 \\n\"");
assertParseError("music.hmm <> 12", "Exception parsing document selector 'music.hmm <> 12': Encountered \" \">\" \">\"\" at line 1, column 12.");
assertParse("music.hmm >= 123");
assertParse("music.hmm > 123");
assertParse("music.hmm <= 123");
assertParse("music.hmm < 123");
assertParse("music.hmm != 123");
assertParse("music.hmm");
assertParse("true", "TRUE");
assertParse("false", "FALSE");
assertParse("true", "true");
assertParse("false", "false");
assertParse("false", "faLSe");
assertParse("mytype");
assertParse("id == \"id:ns:mytype::mytest\"");
assertParse("id.namespace == \"myspace\"");
assertParse("id.scheme == \"id\"");
assertParse("id.type == \"mytype\"");
assertParse("id.user == 1234");
assertParse("id.bucket == 8388608", "id.bucket == 0x800000");
assertParse("id.bucket == 8429568", "id.bucket == 0x80a000");
assertParse("id.bucket == -9223372036854775566",
"id.bucket == 0x80000000000000f2");
assertParse("id.group == \"yahoo.com\"");
assertParse("id.specific == \"mypart\"");
assertParse("id.scheme = \"*doc\"");
assertParse("music.artist =~ \"(john|barry|shrek)\"");
assertParse("id.hash() == 124");
assertParse("id.specific.hash() == 124");
assertParse("music.artist.lowercase() == \"chang\"");
assertParse("music.artist.lowercase().hash() == 124");
assertParse("music.version() == 8");
assertParse("music == 8");
assertParse("(123) < (200)", "(123) < (200)");
assertParse("(\"hmm\") < (id.scheme)", "(\"hmm\") < (id.scheme)");
assertParse("(1 + 2) > 1");
assertParse("1 + 2 > 1", "1 + 2 > 1");
assertParse("(1 - 2) > 1");
assertParse("(1 * 2) > 1");
assertParse("(1 / 2) > 1");
assertParse("(1 % 2) > 1");
assertParse("((1 + 2) * (4 - 2)) == 1");
assertParse("(1 + 2) * (4 - 2) == 1", "(1 + 2) * (4 - 2) == 1");
assertParse("((23 + 643) / (34 % 10)) > 34");
assertParse("23 + 643 / 34 % 10 > 34", "23 + 643 / 34 % 10 > 34");
}
@Test
public void testParseReservedTokens() throws ParseException {
assertParse("user.fieldName == \"fieldValue\"");
assertParse("documentName.user == \"fieldValue\"");
assertParse("group.fieldName == \"fieldValue\"");
assertParse("documentName.group == \"fieldValue\"");
}
@Test
public void testParseBranches() throws ParseException {
assertParse("((true or false) and (false or true))");
assertParse("(true or (not false and not true))");
assertParse("((243) < 300 and (\"FOO\").lowercase() == (\"foo\"))");
}
@Test
@Test
public void testDocumentRemove() throws ParseException {
assertEquals(Result.TRUE, evaluate("test", createRemove("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test", createRemove("id:ns:null::1")));
assertEquals(Result.TRUE, evaluate("test", createRemove("id:ns:test:n=1234:1")));
assertEquals(Result.INVALID, evaluate("test.hint", createRemove("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test.hint", createRemove("id:ns:null::1")));
assertEquals(Result.INVALID, evaluate("test.hint == 0", createRemove("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test.anything", createRemove("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test and test.hint == 0", createRemove("id:ns:test::1")));
}
private DocumentRemove createRemove(String docId) {
return new DocumentRemove(new DocumentId(docId));
}
@Test
public void testDocumentGet() throws ParseException {
assertEquals(Result.TRUE, evaluate("test", createGet("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test", createGet("id:ns:null::1")));
assertEquals(Result.TRUE, evaluate("test", createGet("id:ns:test:n=1234:1")));
assertEquals(Result.INVALID, evaluate("test.hint", createGet("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test.hint", createGet("id:ns:null::1")));
assertEquals(Result.INVALID, evaluate("test.hint == 0", createGet("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test.anything", createGet("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test and test.hint == 0", createGet("id:ns:test::1")));
}
private DocumentGet createGet(String docId) {
return new DocumentGet(new DocumentId(docId));
}
@Test
public void testInvalidLogic() throws ParseException {
DocumentPut put = new DocumentPut(manager.getDocumentType("test"), new DocumentId("id:ns:test::"));
DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:ns:test::"));
assertEquals(Result.FALSE, evaluate("test.content", put));
assertEquals(Result.INVALID, evaluate("test.content", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1", put));
assertEquals(Result.INVALID, evaluate("test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 and true", put));
assertEquals(Result.INVALID, evaluate("test.content = 1 and true", upd));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", put));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", put));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 or false", put));
assertEquals(Result.INVALID, evaluate("test.content = 1 or false", upd));
assertEquals(Result.FALSE, evaluate("true and test.content = 1", put));
assertEquals(Result.INVALID, evaluate("true and test.content = 1", upd));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", put));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", put));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("false or test.content = 1", put));
assertEquals(Result.INVALID, evaluate("false or test.content = 1", upd));
}
List<DocumentPut> createDocs() {
List<DocumentPut> documents = new ArrayList<>();
documents.add(createDocument("id:myspace:test::anything", 24, 2.0f, "foo", "bar"));
documents.add(createDocument("id:anotherspace:test::foo", 13, 4.1f, "bar", "foo"));
documents.add(createDocument("id:myspace:test:n=1234:mail1", 15, 1.0f, "some", "some"));
documents.add(createDocument("id:myspace:test:n=5678:bar", 14, 2.4f, "Yet", "More"));
documents.add(createDocument("id:myspace:test:n=2345:mail2", 15, 1.0f, "bar", "baz"));
documents.add(createDocument("id:myspace:test:g=mygroup:qux", 15, 1.0f, "quux", "corge"));
documents.add(createDocument("id:myspace:test::missingint", null, 2.0f, null, "bar"));
Struct sval = new Struct(documents.get(1).getDocument().getField("mystruct").getDataType());
sval.setFieldValue("key", new IntegerFieldValue(14));
sval.setFieldValue("value", new StringFieldValue("structval"));
documents.get(1).getDocument().setFieldValue("mystruct", sval);
Array<Struct> aval = new Array<>(documents.get(1).getDocument().getField("structarray").getDataType());
{
Struct sval1 = new Struct(aval.getDataType().getNestedType());
sval1.setFieldValue("key", new IntegerFieldValue(15));
sval1.setFieldValue("value", new StringFieldValue("structval1"));
Struct sval2 = new Struct(aval.getDataType().getNestedType());
sval2.setFieldValue("key", new IntegerFieldValue(16));
sval2.setFieldValue("value", new StringFieldValue("structval2"));
aval.add(sval1);
aval.add(sval2);
}
documents.get(1).getDocument().setFieldValue("structarray", aval);
MapFieldValue<IntegerFieldValue, StringFieldValue> mval =
new MapFieldValue<>((MapDataType)documents.get(1).getDocument().getField("mymap").getDataType());
mval.put(new IntegerFieldValue(3), new StringFieldValue("a"));
mval.put(new IntegerFieldValue(5), new StringFieldValue("b"));
mval.put(new IntegerFieldValue(7), new StringFieldValue("c"));
documents.get(1).getDocument().setFieldValue("mymap", mval);
MapFieldValue<StringFieldValue, Array> amval =
new MapFieldValue<>((MapDataType)documents.get(1).getDocument().getField("structarrmap").getDataType());
amval.put(new StringFieldValue("foo"), aval);
Array<Struct> abval = new Array<>(documents.get(1).getDocument().getField("structarray").getDataType());
{
Struct sval1 = new Struct(aval.getDataType().getNestedType());
sval1.setFieldValue("key", new IntegerFieldValue(17));
sval1.setFieldValue("value", new StringFieldValue("structval3"));
Struct sval2 = new Struct(aval.getDataType().getNestedType());
sval2.setFieldValue("key", new IntegerFieldValue(18));
sval2.setFieldValue("value", new StringFieldValue("structval4"));
abval.add(sval1);
abval.add(sval2);
}
amval.put(new StringFieldValue("bar"), abval);
documents.get(1).getDocument().setFieldValue("structarrmap", amval);
WeightedSet<StringFieldValue> wsval = new WeightedSet<>(documents.get(1).getDocument().getField("stringweightedset").getDataType());
wsval.add(new StringFieldValue("foo"));
wsval.add(new StringFieldValue("val1"));
wsval.add(new StringFieldValue("val2"));
wsval.add(new StringFieldValue("val3"));
wsval.add(new StringFieldValue("val4"));
documents.get(1).getDocument().setFieldValue("stringweightedset", wsval);
Struct sval3 = new Struct(documents.get(2).getDocument().getField("mystruct").getDataType());
documents.get(2).getDocument().setFieldValue("mystruct", sval3);
Array aval2 = new Array(documents.get(2).getDocument().getField("structarray").getDataType());
documents.get(2).getDocument().setFieldValue("structarray", aval2);
Array<IntegerFieldValue> intvals1 = new Array<>(documents.get(0).getDocument().getField("intarray").getDataType());
intvals1.add(new IntegerFieldValue(12));
intvals1.add(new IntegerFieldValue(40));
intvals1.add(new IntegerFieldValue(60));
intvals1.add(new IntegerFieldValue(84));
documents.get(0).getDocument().setFieldValue("intarray", intvals1);
Array<IntegerFieldValue> intvals2 = new Array<>(documents.get(1).getDocument().getField("intarray").getDataType());
intvals2.add(new IntegerFieldValue(3));
intvals2.add(new IntegerFieldValue(56));
intvals2.add(new IntegerFieldValue(23));
intvals2.add(new IntegerFieldValue(9));
documents.get(1).getDocument().setFieldValue("intarray", intvals2);
return documents;
}
@Test
public void testOperators() throws ParseException {
List<DocumentPut> documents = createDocs();
assertEquals(Result.TRUE, evaluate("", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 < 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 < 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 <= 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 <= 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 <= 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("10 >= 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 >= 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 >= 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("10 > 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 > 10", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 == 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 == 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 != 10", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 != 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"foo\" != \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\" != \"foo\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("'foo' == \"bar\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("'foo' == \"foo\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" = \"a\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" = \"*a*\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" = \"\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"\" = \"\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" =~ \"^a$\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" =~ \"a\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" =~ \"\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"\" =~ \"\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 = 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 = 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10.2", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10.2 < 30", documents.get(0)));
assertEquals(Result.INVALID, evaluate("30 < \"foo\"", documents.get(0)));
assertEquals(Result.INVALID, evaluate("30 > \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 != \"foo\"", documents.get(0)));
assertEquals(Result.INVALID, evaluate("14.2 <= \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null == null", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null = null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null != null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null == \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("14.3 == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null = 0", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint = 24", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint = 24", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hint = 13", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint = 13", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.hfloat = 2.0", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hfloat = 1.0", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hfloat = 4.1", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hfloat > 4.09 and test.hfloat < 4.11", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.content = \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = \"bar\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.content = \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.content = \"foo\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hstring == test.content", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hstring == test.content", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.hint + 1 > 13", documents.get(1)));
DocumentPut doc1234 = documents.get(6);
assertEquals(Result.TRUE, evaluate("test.hint != 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint == 1234", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint < 1234 and false", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234 and true", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint < 1234 or true", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234 or false", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint and test.hint < 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint != null and test.hint < 1234", doc1234));
assertEquals(Result.TRUE, evaluate("test", documents.get(0)));
assertEquals(Result.FALSE, evaluate("nonexisting", documents.get(0)));
assertEquals(Result.FALSE, evaluate("nonexisting.reallynonexisting", documents.get(0)));
assertEquals(Result.INVALID, evaluate("nonexisting.reallynonexisting > 13", documents.get(0)));
assertEquals(Result.FALSE, evaluate("true.foo", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hstring", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint", doc1234));
assertEquals(Result.FALSE, evaluate("test.hstring", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hstring", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint != null", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null != test.hint", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null == test.hint", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null == test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint == null", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint != null", doc1234));
assertEquals(Result.FALSE, evaluate("null != test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint or true", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hint and true", doc1234));
assertEquals(Result.FALSE, evaluate("not test.hint and false", doc1234));
assertEquals(Result.TRUE, evaluate("id == \"id:myspace:test::anything\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate(" iD== \"id:myspace:test::anything\" ", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id == \"id:myspa:test::nything\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("Id.scHeme == \"xyz\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.scheme == \"id\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.type == \"test\"", documents.get(4)));
assertEquals(Result.FALSE, evaluate("id.type == \"wrong\"", documents.get(4)));
assertEquals(Result.TRUE, evaluate("Id.namespaCe == \"myspace\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id.NaMespace == \"pace\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific == \"anything\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.user=1234", documents.get(2)));
assertEquals(Result.TRUE, evaluate("id.user=2345", documents.get(4)));
assertEquals(Result.TRUE, evaluate("id.group=\"mygroup\"", documents.get(5)));
assertError("id.user == 1234", documents.get(0), "User identifier is null.");
assertError("id.group == 1234", documents.get(3), "Group identifier is null.");
assertError("id.group == \"yahoo\"", documents.get(3), "Group identifier is null.");
assertEquals(Result.FALSE, evaluate("true and false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true or false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("false and true or true and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("false or true and true or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("not false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("not true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true and not false or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("((243 < 300) and (\"FOO\".lowercase() == \"foo\"))", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 or false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("true and test.content = 1", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false or test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hstring.lowercase() == \"Yet\"", documents.get(3)));
assertEquals(Result.TRUE, evaluate("test.hstring.lowercase() == \"yet\"", documents.get(3)));
assertEquals(Result.FALSE, evaluate("test.hfloat.lowercase() == \"yet\"", documents.get(3)));
assertEquals(Result.TRUE, evaluate("\"bar\".hash() == -270124981", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\".hash().abs() == 270124981", documents.get(0)));
assertError("null.hash() == 22460089", documents.get(0), "Can not invoke 'hash()' on 'null' because that term evaluated to null");
assertEquals(Result.FALSE, evaluate("(0.234).hash() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("(0.234).lowercase() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\".hash() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("(234).hash() == 123", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific.hash() = 596580044", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific.hash() % 10 = 4", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id.specific.hash() % 10 = 2", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"foo\" + \"bar\" = \"foobar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\" + 4 = 25", documents.get(0)));
assertEquals(Result.FALSE, evaluate("34.0 % 4 = 4", documents.get(0)));
assertEquals(Result.TRUE, evaluate("-6 % 10 = -6", documents.get(0)));
assertEquals(Result.FALSE, evaluate("0 > now()", documents.get(0)));
assertEquals(Result.TRUE, evaluate("0 < now()", documents.get(0)));
assertEquals(Result.TRUE, evaluate("0 < now() - 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("now() - 20 < now() - 10", documents.get(0)));
long secondsNow = System.currentTimeMillis() / 1000;
assertEquals(Result.TRUE, evaluate("now() - " + secondsNow + " < 2", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.mystruct", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.mystruct == test.mystruct", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct == test.mystruct", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct != test.mystruct", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.mystruct != test.mystruct", documents.get(1)));
assertEquals(Result.INVALID, evaluate("test.mystruct < test.mystruct", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray == test.structarray", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray == test.structarray", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.key == 15", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray[4].key == 15", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray.key == 15", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].key == 16", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].key = 16", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value == \"structval1\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray[4].value == \"structval1\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray.value == \"structval1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[0].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.key = 15", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray.key = 15", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray.key = 15", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value = \"structval2\"", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray.value = \"*ctval*\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].value = \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray[1].value = \"batman\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray.value =~ \"structval[1-9]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value =~ \"structval[a-z]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct.value = \"struc?val\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct.value = \"struc?val\"", documents.get(1)));
assertEquals(Result.INVALID, evaluate("test.mystruct.value =~ \"struct.*\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct.value =~ \"struct.*\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.intarray < 5", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray < 5", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray > 80", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.intarray > 80", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray >= 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray <= 3", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray == 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray != 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$x].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$x].value == \"structval2\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mymap", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3}", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{9}", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} == \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} == \"b\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{9} == \"b\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap == \"a\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} = \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} = \"b\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} =~ \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} =~ \"b\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap.value = \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap.value = \"d\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap.value =~ \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap.value =~ \"d\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap == 3", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap == 4", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap = 3", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap = 4", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap.value[$y].key == 15 AND test.structarrmap.value[$y].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap.value[$y].key == 15 AND test.structarrmap.value[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$y}[$x].value == \"structval2\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset{val1}", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset{val1} == 1", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset{val1} == 2", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset == \"val1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset = \"val*\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset =~ \"val[0-9]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset == \"val5\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset = \"val5\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset =~ \"val5\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap{$x}.key == 15 AND test.stringweightedset{$x}", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}.key == 17 AND test.stringweightedset{$x}", documents.get(1)));
}
@Test
public void testTicket1769674() {
assertParseError("music.uri=\"junk",
"Lexical error at line -1, column 17. Encountered: <EOF> after : \"\\\"junk\"");
}
@Test
public void testThatVisitingReportsCorrectResult() throws ParseException {
assertVisitWithValidNowWorks("music.expire > now()");
assertVisitWithValidNowWorks("music.expire > now() and video.expire > now()");
assertVisitWithValidNowWorks("music.expire > now() or video");
assertVisitWithValidNowWorks("music.expire > now() or video.date < 300");
assertVisitWithValidNowWorks("video.date < 300 or music.expire > now()");
assertVisitWithValidNowWorks("video.date < 300 and music.expire > now()");
assertVisitWithValidNowWorks("music.insertdate > now() - 300 and video.expire > now() - 3600");
assertVisitWithoutNowWorks("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval1\"");
assertVisitWithoutNowWorks("music.artist.lowercase() == \"chang\"");
assertVisitWithInvalidNowFails("music.expire > now() + 300", "Arithmetic operator '+' is not supported");
assertVisitWithInvalidNowFails("music.expire < now()", "Comparison operator '<' is not supported");
assertVisitWithInvalidNowFails("music.expire >= now()", "Comparison operator '>=' is not supported");
assertVisitWithInvalidNowFails("now() > now()", "Left hand side of comparison must be a document field");
assertVisitWithInvalidNowFails("music.name.hash() > now()", "Only attribute items are supported");
}
@Test
public void testThatSelectionIsConvertedToQueries() throws ParseException {
assertThatQueriesAreCreated("music.expire > now()", Arrays.asList("music"), Arrays.asList("expire:>now(0)"));
assertThatQueriesAreCreated("music.expire > now() - 300", Arrays.asList("music"), Arrays.asList("expire:>now(300)"));
assertThatQueriesAreCreated("music.expire > now() - 300 and video.expire > now() - 3600", Arrays.asList("music", "video"), Arrays.asList("expire:>now(300)", "expire:>now(3600)"));
assertThatQueriesAreCreated("music.expire > now() - 300 or video", Arrays.asList("music"), Arrays.asList("expire:>now(300)"));
assertVisitWithInvalidNowFails("music.field1 > now() - 300 and music.field2 > now() - 300", "Specifying multiple document types is not allowed");
assertVisitWithInvalidNowFails("music.field1 > now() - 300 and video.field1 > now() and music.field2 > now() - 300", "Specifying multiple document types is not allowed");
assertVisitWithInvalidNowFails("now() > music.field", "Left hand side of comparison must be a document field");
}
public void assertThatQueriesAreCreated(String selection, List<String> expectedDoctypes, List<String> expectedQueries) throws ParseException {
DocumentSelector selector = new DocumentSelector(selection);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
selector.visit(converter);
Map<String, String> queryMap = converter.getQueryMap();
assertEquals(expectedQueries.size(), queryMap.size());
for (int i = 0; i < expectedQueries.size(); i++) {
assertTrue(queryMap.containsKey(expectedDoctypes.get(i)));
assertEquals(expectedQueries.get(i), queryMap.get(expectedDoctypes.get(i)));
}
}
public void assertVisitWithoutNowWorks(String expression) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertFalse(visitor.requiresConversion());
}
public void assertVisitWithValidNowWorks(String expression) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
try {
selector.visit(converter);
} catch (Exception e) {
assertFalse("Converter throws exception : " + e.getMessage(), true);
}
}
public void assertVisitWithInvalidNowFails(String expression, String expectedError) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
try {
selector.visit(converter);
assertFalse("Should not be able to convert " + expression + " query", true);
} catch (Exception e) {
assertEquals(expectedError, e.getMessage());
}
}
private static DocumentPut createDocument(String id, Integer hInt, float hFloat, String hString, String content) {
Document doc = new Document(manager.getDocumentType("test"), new DocumentId(id));
if (hInt != null)
doc.setFieldValue("hint", new IntegerFieldValue(hInt));
doc.setFieldValue("hfloat", new FloatFieldValue(hFloat));
if (hString != null)
doc.setFieldValue("hstring", new StringFieldValue(hString));
doc.setFieldValue("content", new StringFieldValue(content));
return new DocumentPut(doc);
}
private static void assertParse(String expression) throws ParseException {
assertParse(expression, expression);
}
private static void assertParse(String expectedString, String expressionString) throws ParseException {
DocumentSelector selector = new DocumentSelector(expressionString);
if (expectedString != null) {
assertEquals(expectedString, selector.toString());
}
}
private static void assertParseError(String expressionString, String expectedError) {
try {
new DocumentSelector(expressionString);
fail("The expression '" + expressionString + "' should throw an exception.");
}
catch (ParseException e) {
Throwable t = e;
if (t.getCause() instanceof TokenMgrException) {
t = t.getCause();
}
assertEquals(expectedError, Exceptions.toMessageString(t).substring(0, expectedError.length()));
}
}
private static Result evaluate(String expressionString, DocumentOperation op) throws ParseException {
return new DocumentSelector(expressionString).accepts(op);
}
private static void assertError(String expressionString, DocumentOperation op, String expectedError) {
try {
evaluate(expressionString, op);
fail("The evaluation of '" + expressionString + "' should throw an exception.");
} catch (ParseException e) {
fail("The expression '" + expressionString + "' should assertEquals ok.");
} catch (RuntimeException e) {
System.err.println("Error was : " + e);
assertTrue(e.getMessage().length() >= expectedError.length());
assertEquals(expectedError, e.getMessage().substring(0, expectedError.length()));
}
}
} | class DocumentSelectorTestCase {
private static DocumentTypeManager manager = new DocumentTypeManager();
@Before
public void setUp() {
DocumentType type = new DocumentType("test");
type.addHeaderField("hint", DataType.INT);
type.addHeaderField("hfloat", DataType.FLOAT);
type.addHeaderField("hstring", DataType.STRING);
type.addField("content", DataType.STRING);
StructDataType mystruct = new StructDataType("mystruct");
mystruct.addField(new Field("key", DataType.INT, false));
mystruct.addField(new Field("value", DataType.STRING, false));
type.addHeaderField("mystruct", mystruct);
ArrayDataType structarray = new ArrayDataType(mystruct);
type.addField("structarray", structarray);
type.addField("stringweightedset", new WeightedSetDataType(DataType.STRING, false, false));
type.addField("mymap", new MapDataType(DataType.INT, DataType.STRING));
type.addField("structarrmap", new MapDataType(DataType.STRING, structarray));
ArrayDataType intarray = new ArrayDataType(DataType.INT);
type.addField("intarray", intarray);
manager.registerDocumentType(type);
manager.registerDocumentType(new DocumentType("notandor"));
manager.registerDocumentType(new DocumentType("ornotand"));
manager.registerDocumentType(new DocumentType("andornot"));
manager.registerDocumentType(new DocumentType("idid"));
manager.registerDocumentType(new DocumentType("usergroup"));
}
@Test
public void testParsing() throws ParseException {
assertParse("3.14 > 0");
assertParse("-999 > 0");
assertParse("150000.0 > 0", "15e4 > 0");
assertParse("3.4E-4 > 0", "3.4e-4 > 0");
assertParse("\" Test \" = \"*\"");
assertParse("id = \"*\"", "id = '*'");
assertParse("id.group == 3");
assertParse("id.namespace = \"*\"");
assertParse("id.hash() > 0");
assertParse("id.namespace.hash() > 0");
assertParse("music.artist = \"*\"");
assertParse("music.artist.lowercase() = \"*\"");
assertParse("music_.artist = \"*\"");
assertParse("music_foo.artist = \"*\"");
assertParse("music_foo_.artist = \"*\"");
assertParse("(4 + 3) > 0", "(4+3) > 0");
assertParse("1 + 1 > 0", "1 +1 > 0");
assertParse("1 + -1 > 0", "1 + -1 > 0");
assertParse("1 + 1.0 > 0", "1 + +1.0 > 0");
assertParse("1 - 1 > 0", "1 -1 > 0");
assertParse("1 - -1 > 0", "1 - -1 > 0");
assertParse("1 - 1.0 > 0", "1 - +1.0 > 0");
assertParse("1 + 2 * 3 - 10 % 2 / 3 > 0", "1 +2 * 3- 10%2 /3 > 0");
assertParse("((43 + 14) / 34) > 0");
assertParse("(34 * ((3 - 1) % 4)) > 0");
assertParse("true");
assertParse("false");
assertParse("music");
assertParse("(music or book)");
assertParse("music or book", "music or book");
assertParse("(music or (book and video))");
assertParse("music or (book and video)", "music or (book and video)");
assertParse("((music or book) and video)");
assertParse("(music or book) and video", "(music or book) and video");
assertParse("music.test > 0");
assertParse("music.artist = \"*john*\"");
assertParse("music.length >= 180");
assertParse("true or not false and true", "true oR nOt false And true");
assertParse("(true or false) and true", "(true oR false) aNd true");
assertParse("music.expire > now()");
assertParse("music.expire > now() - 300");
assertParse("now or now_search");
assertParse("(music.expire / 1000) > (now() - 300)");
}
@Test
public void testReservedWords() throws ParseException {
assertParse(null, "id == 'id' or id_t or idtype");
assertParse(null, "id.scheme == 'scheme' or scheme_t or schemetype");
assertParse(null, "id.namespace == 'namespace' or namespace_t or namespacetype");
assertParse(null, "id.specific == 'specific' or specific_t or specifictype");
assertParse(null, "id.user == 'user' or user_t or usertype");
assertParse(null, "id.group == 'group' or group_t or grouptype");
assertParse(null, "id.bucket == 'bucket' or bucket_t or buckettype");
assertParse(null, "null == 'null' or null_t or nulltype");
assertParse(null, "true or true_t or truetype");
assertParse(null, "false or false_t or falsetype");
assertParse(null, "true or and_t or andtype");
assertParse(null, "true or or_t or ortype");
}
@Test
public void testCjkParsing() throws ParseException {
assertParse("music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"",
"music.artist = \"\u4f73\u80fd\u7d22\u5c3c\u60e0\u666e\"");
assertParse("music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"",
"music.artist = \"\\u4f73\\u80fd\\u7d22\\u5c3c\\u60e0\\u666e\"");
}
@Test
public void testParseTerminals() throws ParseException {
assertParse("true");
assertParse("music.hmm == 123");
assertParse("music.hmm == 123.53", "music.hmm == +123.53");
assertParse("music.hmm == -123.5");
assertParse("music.hmm == 2.3412352E8", "music.hmm == 234123.52e3");
assertParse("music.hmm == -234.12352", "music.hmm == -234123.52E-3");
assertParse("music.hmm < aaa");
assertParse("music.hmm == \"test\"");
assertParse("music.hmm{test} == \"test\"");
assertParse("music.hmm{test}.foo[3].key == \"test\"");
assertParse("music.hmm == \"te st \"");
assertParse("music.hmm == \"test\"", " \t music.hmm\t== \t \"test\"\t");
assertParse("music.hmm == \"tab\\ttest\"");
assertParse("music.hmm == \"tab\\u0666test\"", "music.hmm == \"tab\\u0666test\"");
assertParse("music.hmm == \"tabcomplete\"", "music.hmm == \"tabcomplete\"");
assertParse("music.hmm == \"tabysf\"", "music.hmm == \"tab\\ysf\"");
assertParse("music.h == \"\\ttx48 \\n\"", "music.h == \"\\tt\\x48 \\n\"");
assertParseError("music.hmm <> 12", "Exception parsing document selector 'music.hmm <> 12': Encountered \" \">\" \">\"\" at line 1, column 12.");
assertParse("music.hmm >= 123");
assertParse("music.hmm > 123");
assertParse("music.hmm <= 123");
assertParse("music.hmm < 123");
assertParse("music.hmm != 123");
assertParse("music.hmm");
assertParse("true", "TRUE");
assertParse("false", "FALSE");
assertParse("true", "true");
assertParse("false", "false");
assertParse("false", "faLSe");
assertParse("mytype");
assertParse("id == \"id:ns:mytype::mytest\"");
assertParse("id.namespace == \"myspace\"");
assertParse("id.scheme == \"id\"");
assertParse("id.type == \"mytype\"");
assertParse("id.user == 1234");
assertParse("id.bucket == 8388608", "id.bucket == 0x800000");
assertParse("id.bucket == 8429568", "id.bucket == 0x80a000");
assertParse("id.bucket == -9223372036854775566",
"id.bucket == 0x80000000000000f2");
assertParse("id.group == \"yahoo.com\"");
assertParse("id.specific == \"mypart\"");
assertParse("id.scheme = \"*doc\"");
assertParse("music.artist =~ \"(john|barry|shrek)\"");
assertParse("id.hash() == 124");
assertParse("id.specific.hash() == 124");
assertParse("music.artist.lowercase() == \"chang\"");
assertParse("music.artist.lowercase().hash() == 124");
assertParse("music.version() == 8");
assertParse("music == 8");
assertParse("(123) < (200)", "(123) < (200)");
assertParse("(\"hmm\") < (id.scheme)", "(\"hmm\") < (id.scheme)");
assertParse("(1 + 2) > 1");
assertParse("1 + 2 > 1", "1 + 2 > 1");
assertParse("(1 - 2) > 1");
assertParse("(1 * 2) > 1");
assertParse("(1 / 2) > 1");
assertParse("(1 % 2) > 1");
assertParse("((1 + 2) * (4 - 2)) == 1");
assertParse("(1 + 2) * (4 - 2) == 1", "(1 + 2) * (4 - 2) == 1");
assertParse("((23 + 643) / (34 % 10)) > 34");
assertParse("23 + 643 / 34 % 10 > 34", "23 + 643 / 34 % 10 > 34");
}
@Test
public void testParseReservedTokens() throws ParseException {
assertParse("user.fieldName == \"fieldValue\"");
assertParse("documentName.user == \"fieldValue\"");
assertParse("group.fieldName == \"fieldValue\"");
assertParse("documentName.group == \"fieldValue\"");
}
@Test
public void testParseBranches() throws ParseException {
assertParse("((true or false) and (false or true))");
assertParse("(true or (not false and not true))");
assertParse("((243) < 300 and (\"FOO\").lowercase() == (\"foo\"))");
}
@Test
@Test
public void testDocumentRemove() throws ParseException {
assertEquals(Result.TRUE, evaluate("test", createRemove("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test", createRemove("id:ns:null::1")));
assertEquals(Result.TRUE, evaluate("test", createRemove("id:ns:test:n=1234:1")));
assertEquals(Result.INVALID, evaluate("test.hint", createRemove("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test.hint", createRemove("id:ns:null::1")));
assertEquals(Result.INVALID, evaluate("test.hint == 0", createRemove("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test.anything", createRemove("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test and test.hint == 0", createRemove("id:ns:test::1")));
}
private DocumentRemove createRemove(String docId) {
return new DocumentRemove(new DocumentId(docId));
}
@Test
public void testDocumentGet() throws ParseException {
assertEquals(Result.TRUE, evaluate("test", createGet("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test", createGet("id:ns:null::1")));
assertEquals(Result.TRUE, evaluate("test", createGet("id:ns:test:n=1234:1")));
assertEquals(Result.INVALID, evaluate("test.hint", createGet("id:ns:test::1")));
assertEquals(Result.FALSE, evaluate("test.hint", createGet("id:ns:null::1")));
assertEquals(Result.INVALID, evaluate("test.hint == 0", createGet("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test.anything", createGet("id:ns:test::1")));
assertEquals(Result.INVALID, evaluate("test and test.hint == 0", createGet("id:ns:test::1")));
}
private DocumentGet createGet(String docId) {
return new DocumentGet(new DocumentId(docId));
}
@Test
public void testInvalidLogic() throws ParseException {
DocumentPut put = new DocumentPut(manager.getDocumentType("test"), new DocumentId("id:ns:test::"));
DocumentUpdate upd = new DocumentUpdate(manager.getDocumentType("test"), new DocumentId("id:ns:test::"));
assertEquals(Result.FALSE, evaluate("test.content", put));
assertEquals(Result.INVALID, evaluate("test.content", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1", put));
assertEquals(Result.INVALID, evaluate("test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 and true", put));
assertEquals(Result.INVALID, evaluate("test.content = 1 and true", upd));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", put));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", put));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", upd));
assertEquals(Result.FALSE, evaluate("test.content = 1 or false", put));
assertEquals(Result.INVALID, evaluate("test.content = 1 or false", upd));
assertEquals(Result.FALSE, evaluate("true and test.content = 1", put));
assertEquals(Result.INVALID, evaluate("true and test.content = 1", upd));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", put));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", put));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", upd));
assertEquals(Result.FALSE, evaluate("false or test.content = 1", put));
assertEquals(Result.INVALID, evaluate("false or test.content = 1", upd));
}
List<DocumentPut> createDocs() {
List<DocumentPut> documents = new ArrayList<>();
documents.add(createDocument("id:myspace:test::anything", 24, 2.0f, "foo", "bar"));
documents.add(createDocument("id:anotherspace:test::foo", 13, 4.1f, "bar", "foo"));
documents.add(createDocument("id:myspace:test:n=1234:mail1", 15, 1.0f, "some", "some"));
documents.add(createDocument("id:myspace:test:n=5678:bar", 14, 2.4f, "Yet", "More"));
documents.add(createDocument("id:myspace:test:n=2345:mail2", 15, 1.0f, "bar", "baz"));
documents.add(createDocument("id:myspace:test:g=mygroup:qux", 15, 1.0f, "quux", "corge"));
documents.add(createDocument("id:myspace:test::missingint", null, 2.0f, null, "bar"));
Struct sval = new Struct(documents.get(1).getDocument().getField("mystruct").getDataType());
sval.setFieldValue("key", new IntegerFieldValue(14));
sval.setFieldValue("value", new StringFieldValue("structval"));
documents.get(1).getDocument().setFieldValue("mystruct", sval);
Array<Struct> aval = new Array<>(documents.get(1).getDocument().getField("structarray").getDataType());
{
Struct sval1 = new Struct(aval.getDataType().getNestedType());
sval1.setFieldValue("key", new IntegerFieldValue(15));
sval1.setFieldValue("value", new StringFieldValue("structval1"));
Struct sval2 = new Struct(aval.getDataType().getNestedType());
sval2.setFieldValue("key", new IntegerFieldValue(16));
sval2.setFieldValue("value", new StringFieldValue("structval2"));
aval.add(sval1);
aval.add(sval2);
}
documents.get(1).getDocument().setFieldValue("structarray", aval);
MapFieldValue<IntegerFieldValue, StringFieldValue> mval =
new MapFieldValue<>((MapDataType)documents.get(1).getDocument().getField("mymap").getDataType());
mval.put(new IntegerFieldValue(3), new StringFieldValue("a"));
mval.put(new IntegerFieldValue(5), new StringFieldValue("b"));
mval.put(new IntegerFieldValue(7), new StringFieldValue("c"));
documents.get(1).getDocument().setFieldValue("mymap", mval);
MapFieldValue<StringFieldValue, Array> amval =
new MapFieldValue<>((MapDataType)documents.get(1).getDocument().getField("structarrmap").getDataType());
amval.put(new StringFieldValue("foo"), aval);
Array<Struct> abval = new Array<>(documents.get(1).getDocument().getField("structarray").getDataType());
{
Struct sval1 = new Struct(aval.getDataType().getNestedType());
sval1.setFieldValue("key", new IntegerFieldValue(17));
sval1.setFieldValue("value", new StringFieldValue("structval3"));
Struct sval2 = new Struct(aval.getDataType().getNestedType());
sval2.setFieldValue("key", new IntegerFieldValue(18));
sval2.setFieldValue("value", new StringFieldValue("structval4"));
abval.add(sval1);
abval.add(sval2);
}
amval.put(new StringFieldValue("bar"), abval);
documents.get(1).getDocument().setFieldValue("structarrmap", amval);
WeightedSet<StringFieldValue> wsval = new WeightedSet<>(documents.get(1).getDocument().getField("stringweightedset").getDataType());
wsval.add(new StringFieldValue("foo"));
wsval.add(new StringFieldValue("val1"));
wsval.add(new StringFieldValue("val2"));
wsval.add(new StringFieldValue("val3"));
wsval.add(new StringFieldValue("val4"));
documents.get(1).getDocument().setFieldValue("stringweightedset", wsval);
Struct sval3 = new Struct(documents.get(2).getDocument().getField("mystruct").getDataType());
documents.get(2).getDocument().setFieldValue("mystruct", sval3);
Array aval2 = new Array(documents.get(2).getDocument().getField("structarray").getDataType());
documents.get(2).getDocument().setFieldValue("structarray", aval2);
Array<IntegerFieldValue> intvals1 = new Array<>(documents.get(0).getDocument().getField("intarray").getDataType());
intvals1.add(new IntegerFieldValue(12));
intvals1.add(new IntegerFieldValue(40));
intvals1.add(new IntegerFieldValue(60));
intvals1.add(new IntegerFieldValue(84));
documents.get(0).getDocument().setFieldValue("intarray", intvals1);
Array<IntegerFieldValue> intvals2 = new Array<>(documents.get(1).getDocument().getField("intarray").getDataType());
intvals2.add(new IntegerFieldValue(3));
intvals2.add(new IntegerFieldValue(56));
intvals2.add(new IntegerFieldValue(23));
intvals2.add(new IntegerFieldValue(9));
documents.get(1).getDocument().setFieldValue("intarray", intvals2);
return documents;
}
@Test
public void testOperators() throws ParseException {
List<DocumentPut> documents = createDocs();
assertEquals(Result.TRUE, evaluate("", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 < 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 < 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 <= 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10 <= 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 <= 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("10 >= 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 >= 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 >= 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("10 > 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 > 10", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 == 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 == 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 != 10", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 != 30", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"foo\" != \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\" != \"foo\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("'foo' == \"bar\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("'foo' == \"foo\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" = \"a\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" = \"*a*\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" = \"\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"\" = \"\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" =~ \"^a$\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" =~ \"a\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\" =~ \"\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"\" =~ \"\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 = 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 = 30", documents.get(0)));
assertEquals(Result.FALSE, evaluate("30 < 10.2", documents.get(0)));
assertEquals(Result.TRUE, evaluate("10.2 < 30", documents.get(0)));
assertEquals(Result.INVALID, evaluate("30 < \"foo\"", documents.get(0)));
assertEquals(Result.INVALID, evaluate("30 > \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("30 != \"foo\"", documents.get(0)));
assertEquals(Result.INVALID, evaluate("14.2 <= \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null == null", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null = null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null != null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"bar\" == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null == \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("14.3 == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null = 0", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint = 24", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint = 24", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hint = 13", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint = 13", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.hfloat = 2.0", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hfloat = 1.0", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hfloat = 4.1", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hfloat > 4.09 and test.hfloat < 4.11", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.content = \"bar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = \"bar\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.content = \"foo\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.content = \"foo\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.hstring == test.content", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hstring == test.content", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.hint + 1 > 13", documents.get(1)));
DocumentPut doc1234 = documents.get(6);
assertEquals(Result.TRUE, evaluate("test.hint != 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint == 1234", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint < 1234 and false", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234 and true", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint < 1234 or true", doc1234));
assertEquals(Result.INVALID, evaluate("test.hint < 1234 or false", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint and test.hint < 1234", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint != null and test.hint < 1234", doc1234));
assertEquals(Result.TRUE, evaluate("test", documents.get(0)));
assertEquals(Result.FALSE, evaluate("nonexisting", documents.get(0)));
assertEquals(Result.FALSE, evaluate("nonexisting.reallynonexisting", documents.get(0)));
assertEquals(Result.INVALID, evaluate("nonexisting.reallynonexisting > 13", documents.get(0)));
assertEquals(Result.FALSE, evaluate("true.foo", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hint", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.hstring", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint", doc1234));
assertEquals(Result.FALSE, evaluate("test.hstring", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hstring", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint != null", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null != test.hint", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hint == null", documents.get(0)));
assertEquals(Result.FALSE, evaluate("null == test.hint", documents.get(0)));
assertEquals(Result.TRUE, evaluate("null == test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint == null", doc1234));
assertEquals(Result.FALSE, evaluate("test.hint != null", doc1234));
assertEquals(Result.FALSE, evaluate("null != test.hint", doc1234));
assertEquals(Result.TRUE, evaluate("test.hint or true", doc1234));
assertEquals(Result.TRUE, evaluate("not test.hint and true", doc1234));
assertEquals(Result.FALSE, evaluate("not test.hint and false", doc1234));
assertEquals(Result.TRUE, evaluate("id == \"id:myspace:test::anything\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate(" iD== \"id:myspace:test::anything\" ", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id == \"id:myspa:test::nything\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("Id.scHeme == \"xyz\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.scheme == \"id\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.type == \"test\"", documents.get(4)));
assertEquals(Result.FALSE, evaluate("id.type == \"wrong\"", documents.get(4)));
assertEquals(Result.TRUE, evaluate("Id.namespaCe == \"myspace\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id.NaMespace == \"pace\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific == \"anything\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.user=1234", documents.get(2)));
assertEquals(Result.TRUE, evaluate("id.user=2345", documents.get(4)));
assertEquals(Result.TRUE, evaluate("id.group=\"mygroup\"", documents.get(5)));
assertError("id.user == 1234", documents.get(0), "User identifier is null.");
assertError("id.group == 1234", documents.get(3), "Group identifier is null.");
assertError("id.group == \"yahoo\"", documents.get(3), "Group identifier is null.");
assertEquals(Result.FALSE, evaluate("true and false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true or false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("false and true or true and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("false or true and true or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("not false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("not true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true and not false or false", documents.get(0)));
assertEquals(Result.TRUE, evaluate("((243 < 300) and (\"FOO\".lowercase() == \"foo\"))", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 and true", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.content = 1 or true", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 and false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.content = 1 or false", documents.get(0)));
assertEquals(Result.FALSE, evaluate("true and test.content = 1", documents.get(0)));
assertEquals(Result.TRUE, evaluate("true or test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false and test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("false or test.content = 1", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.hstring.lowercase() == \"Yet\"", documents.get(3)));
assertEquals(Result.TRUE, evaluate("test.hstring.lowercase() == \"yet\"", documents.get(3)));
assertEquals(Result.FALSE, evaluate("test.hfloat.lowercase() == \"yet\"", documents.get(3)));
assertEquals(Result.TRUE, evaluate("\"bar\".hash() == -270124981", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"bar\".hash().abs() == 270124981", documents.get(0)));
assertError("null.hash() == 22460089", documents.get(0), "Can not invoke 'hash()' on 'null' because that term evaluated to null");
assertEquals(Result.FALSE, evaluate("(0.234).hash() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("(0.234).lowercase() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\".hash() == 123", documents.get(0)));
assertEquals(Result.FALSE, evaluate("(234).hash() == 123", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific.hash() = 596580044", documents.get(0)));
assertEquals(Result.TRUE, evaluate("id.specific.hash() % 10 = 4", documents.get(0)));
assertEquals(Result.FALSE, evaluate("id.specific.hash() % 10 = 2", documents.get(0)));
assertEquals(Result.TRUE, evaluate("\"foo\" + \"bar\" = \"foobar\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("\"foo\" + 4 = 25", documents.get(0)));
assertEquals(Result.FALSE, evaluate("34.0 % 4 = 4", documents.get(0)));
assertEquals(Result.TRUE, evaluate("-6 % 10 = -6", documents.get(0)));
assertEquals(Result.FALSE, evaluate("0 > now()", documents.get(0)));
assertEquals(Result.TRUE, evaluate("0 < now()", documents.get(0)));
assertEquals(Result.TRUE, evaluate("0 < now() - 10", documents.get(0)));
assertEquals(Result.TRUE, evaluate("now() - 20 < now() - 10", documents.get(0)));
long secondsNow = System.currentTimeMillis() / 1000;
assertEquals(Result.TRUE, evaluate("now() - " + secondsNow + " < 2", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.mystruct", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.mystruct == test.mystruct", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct == test.mystruct", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct != test.mystruct", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.mystruct != test.mystruct", documents.get(1)));
assertEquals(Result.INVALID, evaluate("test.mystruct < test.mystruct", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray == test.structarray", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray == test.structarray", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.key == 15", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray[4].key == 15", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray.key == 15", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].key == 16", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].key = 16", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value == \"structval1\"", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray[4].value == \"structval1\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray.value == \"structval1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[0].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.key = 15", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.structarray.key = 15", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray.key = 15", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value = \"structval2\"", documents.get(2)));
assertEquals(Result.TRUE, evaluate("test.structarray.value = \"*ctval*\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[1].value = \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray[1].value = \"batman\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray.value =~ \"structval[1-9]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray.value =~ \"structval[a-z]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mystruct.value = \"struc?val\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct.value = \"struc?val\"", documents.get(1)));
assertEquals(Result.INVALID, evaluate("test.mystruct.value =~ \"struct.*\"", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mystruct.value =~ \"struct.*\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.intarray < 5", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray < 5", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray > 80", documents.get(0)));
assertEquals(Result.FALSE, evaluate("test.intarray > 80", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray >= 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray <= 3", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.intarray == 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.intarray != 84", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$x].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$x].value == \"structval2\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarray[$x].key == 15 AND test.structarray[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap", documents.get(0)));
assertEquals(Result.TRUE, evaluate("test.mymap", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3}", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{9}", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} == \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} == \"b\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{9} == \"b\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap == \"a\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} = \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} = \"b\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap{3} =~ \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap{3} =~ \"b\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap.value = \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap.value = \"d\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap.value =~ \"a\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap.value =~ \"d\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap == 3", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap == 4", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.mymap = 3", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.mymap = 4", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap.value[$y].key == 15 AND test.structarrmap.value[$y].value == \"structval1\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap.value[$y].key == 15 AND test.structarrmap.value[$y].value == \"structval2\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$y}[$x].value == \"structval2\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset{val1}", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset{val1} == 1", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset{val1} == 2", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset == \"val1\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset = \"val*\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.stringweightedset =~ \"val[0-9]\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset == \"val5\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset = \"val5\"", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.stringweightedset =~ \"val5\"", documents.get(1)));
assertEquals(Result.TRUE, evaluate("test.structarrmap{$x}.key == 15 AND test.stringweightedset{$x}", documents.get(1)));
assertEquals(Result.FALSE, evaluate("test.structarrmap{$x}.key == 17 AND test.stringweightedset{$x}", documents.get(1)));
}
@Test
public void testTicket1769674() {
assertParseError("music.uri=\"junk",
"Lexical error at line -1, column 17. Encountered: <EOF> after : \"\\\"junk\"");
}
@Test
public void testThatVisitingReportsCorrectResult() throws ParseException {
assertVisitWithValidNowWorks("music.expire > now()");
assertVisitWithValidNowWorks("music.expire > now() and video.expire > now()");
assertVisitWithValidNowWorks("music.expire > now() or video");
assertVisitWithValidNowWorks("music.expire > now() or video.date < 300");
assertVisitWithValidNowWorks("video.date < 300 or music.expire > now()");
assertVisitWithValidNowWorks("video.date < 300 and music.expire > now()");
assertVisitWithValidNowWorks("music.insertdate > now() - 300 and video.expire > now() - 3600");
assertVisitWithoutNowWorks("test.structarrmap{$x}[$y].key == 15 AND test.structarrmap{$x}[$y].value == \"structval1\"");
assertVisitWithoutNowWorks("music.artist.lowercase() == \"chang\"");
assertVisitWithInvalidNowFails("music.expire > now() + 300", "Arithmetic operator '+' is not supported");
assertVisitWithInvalidNowFails("music.expire < now()", "Comparison operator '<' is not supported");
assertVisitWithInvalidNowFails("music.expire >= now()", "Comparison operator '>=' is not supported");
assertVisitWithInvalidNowFails("now() > now()", "Left hand side of comparison must be a document field");
assertVisitWithInvalidNowFails("music.name.hash() > now()", "Only attribute items are supported");
}
@Test
public void testThatSelectionIsConvertedToQueries() throws ParseException {
assertThatQueriesAreCreated("music.expire > now()", Arrays.asList("music"), Arrays.asList("expire:>now(0)"));
assertThatQueriesAreCreated("music.expire > now() - 300", Arrays.asList("music"), Arrays.asList("expire:>now(300)"));
assertThatQueriesAreCreated("music.expire > now() - 300 and video.expire > now() - 3600", Arrays.asList("music", "video"), Arrays.asList("expire:>now(300)", "expire:>now(3600)"));
assertThatQueriesAreCreated("music.expire > now() - 300 or video", Arrays.asList("music"), Arrays.asList("expire:>now(300)"));
assertVisitWithInvalidNowFails("music.field1 > now() - 300 and music.field2 > now() - 300", "Specifying multiple document types is not allowed");
assertVisitWithInvalidNowFails("music.field1 > now() - 300 and video.field1 > now() and music.field2 > now() - 300", "Specifying multiple document types is not allowed");
assertVisitWithInvalidNowFails("now() > music.field", "Left hand side of comparison must be a document field");
}
public void assertThatQueriesAreCreated(String selection, List<String> expectedDoctypes, List<String> expectedQueries) throws ParseException {
DocumentSelector selector = new DocumentSelector(selection);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
selector.visit(converter);
Map<String, String> queryMap = converter.getQueryMap();
assertEquals(expectedQueries.size(), queryMap.size());
for (int i = 0; i < expectedQueries.size(); i++) {
assertTrue(queryMap.containsKey(expectedDoctypes.get(i)));
assertEquals(expectedQueries.get(i), queryMap.get(expectedDoctypes.get(i)));
}
}
public void assertVisitWithoutNowWorks(String expression) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertFalse(visitor.requiresConversion());
}
public void assertVisitWithValidNowWorks(String expression) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
try {
selector.visit(converter);
} catch (Exception e) {
assertFalse("Converter throws exception : " + e.getMessage(), true);
}
}
public void assertVisitWithInvalidNowFails(String expression, String expectedError) throws ParseException {
DocumentSelector selector = new DocumentSelector(expression);
NowCheckVisitor visitor = new NowCheckVisitor();
selector.visit(visitor);
assertTrue(visitor.requiresConversion());
SelectionExpressionConverter converter = new SelectionExpressionConverter();
try {
selector.visit(converter);
assertFalse("Should not be able to convert " + expression + " query", true);
} catch (Exception e) {
assertEquals(expectedError, e.getMessage());
}
}
private static DocumentPut createDocument(String id, Integer hInt, float hFloat, String hString, String content) {
Document doc = new Document(manager.getDocumentType("test"), new DocumentId(id));
if (hInt != null)
doc.setFieldValue("hint", new IntegerFieldValue(hInt));
doc.setFieldValue("hfloat", new FloatFieldValue(hFloat));
if (hString != null)
doc.setFieldValue("hstring", new StringFieldValue(hString));
doc.setFieldValue("content", new StringFieldValue(content));
return new DocumentPut(doc);
}
private static void assertParse(String expression) throws ParseException {
assertParse(expression, expression);
}
private static void assertParse(String expectedString, String expressionString) throws ParseException {
DocumentSelector selector = new DocumentSelector(expressionString);
if (expectedString != null) {
assertEquals(expectedString, selector.toString());
}
}
private static void assertParseError(String expressionString, String expectedError) {
try {
new DocumentSelector(expressionString);
fail("The expression '" + expressionString + "' should throw an exception.");
}
catch (ParseException e) {
Throwable t = e;
if (t.getCause() instanceof TokenMgrException) {
t = t.getCause();
}
assertEquals(expectedError, Exceptions.toMessageString(t).substring(0, expectedError.length()));
}
}
private static Result evaluate(String expressionString, DocumentOperation op) throws ParseException {
return new DocumentSelector(expressionString).accepts(op);
}
private static void assertError(String expressionString, DocumentOperation op, String expectedError) {
try {
evaluate(expressionString, op);
fail("The evaluation of '" + expressionString + "' should throw an exception.");
} catch (ParseException e) {
fail("The expression '" + expressionString + "' should assertEquals ok.");
} catch (RuntimeException e) {
System.err.println("Error was : " + e);
assertTrue(e.getMessage().length() >= expectedError.length());
assertEquals(expectedError, e.getMessage().substring(0, expectedError.length()));
}
}
} |
what does abstract mean? change visitRelation to visitFromClause? | public LogicalPlan visitAliasedRelation(AliasedRelationContext ctx) {
return withTableAlias((LogicalPlan) visitRelation(ctx.relation()), ctx.tableAlias());
} | return withTableAlias((LogicalPlan) visitRelation(ctx.relation()), ctx.tableAlias()); | public LogicalPlan visitAliasedRelation(AliasedRelationContext ctx) {
return withTableAlias(visitRelation(ctx.relation()), ctx.tableAlias());
} | class LogicalPlanBuilder extends DorisParserBaseVisitor<Object> {
protected <T> T typedVisit(ParseTree ctx) {
return (T) ctx.accept(this);
}
/**
* Override the default behavior for all visit methods. This will only return a non-null result
* when the context has only one child. This is done because there is no generic method to
* combine the results of the context children. In all other cases null is returned.
*/
@Override
public Object visitChildren(RuleNode node) {
if (node.getChildCount() == 1) {
return node.getChild(0).accept(this);
} else {
return null;
}
}
@Override
public LogicalPlan visitSingleStatement(SingleStatementContext ctx) {
return ParserUtils.withOrigin(ctx, () -> (LogicalPlan) visit(ctx.statement()));
}
/**
* Visit multi-statements.
*/
@Override
public List<LogicalPlan> visitMultiStatements(MultiStatementsContext ctx) {
return visit(ctx.statement(), LogicalPlan.class);
}
/* ********************************************************************************************
* Plan parsing
* ******************************************************************************************** */
@Override
public Command visitExplain(ExplainContext ctx) {
LogicalPlan logicalPlan = plan(ctx.query());
ExplainLevel explainLevel = ExplainLevel.NORMAL;
if (ctx.level != null) {
explainLevel = ExplainLevel.valueOf(ctx.level.getText().toUpperCase(Locale.ROOT));
}
return new ExplainCommand(explainLevel, logicalPlan);
}
@Override
public LogicalPlan visitQuery(QueryContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
LogicalPlan query = plan(ctx.queryTerm());
LogicalPlan queryOrganization = withQueryOrganization(query, ctx.queryOrganization());
return queryOrganization;
});
}
@Override
public LogicalPlan visitRegularQuerySpecification(RegularQuerySpecificationContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
LogicalPlan relation = withRelation(Optional.ofNullable(ctx.fromClause()));
return withSelectQuerySpecification(
ctx, relation,
ctx.selectClause(),
Optional.ofNullable(ctx.whereClause()),
Optional.ofNullable(ctx.aggClause())
);
});
}
/**
* Create an aliased table reference. This is typically used in FROM clauses.
*/
@Developing
private LogicalPlan withTableAlias(LogicalPlan plan, TableAliasContext ctx) {
String alias = ctx.strictIdentifier().getText();
if (null != ctx.identifierList()) {
throw new ParseException("Do not implemented", ctx);
}
return new LogicalSubQueryAlias<>(alias, plan);
}
@Override
public LogicalPlan visitTableName(TableNameContext ctx) {
List<String> tableId = visitMultipartIdentifier(ctx.multipartIdentifier());
if (null == ctx.tableAlias().strictIdentifier()) {
return new UnboundRelation(tableId);
}
return withTableAlias(new UnboundRelation(tableId), ctx.tableAlias());
}
@Override
public LogicalPlan visitAliasedQuery(AliasedQueryContext ctx) {
return withTableAlias(visitQuery(ctx.query()), ctx.tableAlias());
}
@Override
/**
* Create a star (i.e. all) expression; this selects all elements (in the specified object).
* Both un-targeted (global) and targeted aliases are supported.
*/
@Override
public Expression visitStar(StarContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
final QualifiedNameContext qualifiedNameContext = ctx.qualifiedName();
List<String> target;
if (qualifiedNameContext != null) {
target = qualifiedNameContext.identifier()
.stream()
.map(RuleContext::getText)
.collect(ImmutableList.toImmutableList());
} else {
target = Collections.emptyList();
}
return new UnboundStar(target);
});
}
/**
* Create an aliased expression if an alias is specified. Both single and multi-aliases are
* supported.
*/
@Override
public Expression visitNamedExpression(NamedExpressionContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression expression = getExpression(ctx.expression());
if (ctx.name != null) {
return new Alias(expression, ctx.name.getText());
} else {
return expression;
}
});
}
/**
* Create a comparison expression. This compares two expressions. The following comparison
* operators are supported:
* - Equal: '=' or '=='
* - Null-safe Equal: '<=>'
* - Not Equal: '<>' or '!='
* - Less than: '<'
* - Less then or Equal: '<='
* - Greater than: '>'
* - Greater then or Equal: '>='
*/
@Override
public Expression visitComparison(ComparisonContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression left = getExpression(ctx.left);
Expression right = getExpression(ctx.right);
TerminalNode operator = (TerminalNode) ctx.comparisonOperator().getChild(0);
switch (operator.getSymbol().getType()) {
case DorisParser.EQ:
return new EqualTo(left, right);
case DorisParser.NEQ:
return new Not(new EqualTo(left, right));
case DorisParser.LT:
return new LessThan(left, right);
case DorisParser.GT:
return new GreaterThan(left, right);
case DorisParser.LTE:
return new LessThanEqual(left, right);
case DorisParser.GTE:
return new GreaterThanEqual(left, right);
case DorisParser.NSEQ:
return new NullSafeEqual(left, right);
default:
throw new IllegalStateException("Unsupported comparison expression: "
+ operator.getSymbol().getText());
}
});
}
/**
* Create a not expression.
* format: NOT Expression
* for example:
* not 1
* not 1=1
*/
@Override
public Expression visitLogicalNot(LogicalNotContext ctx) {
return ParserUtils.withOrigin(ctx, () -> new Not(getExpression(ctx.booleanExpression())));
}
@Override
public Expression visitLogicalBinary(LogicalBinaryContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression left = getExpression(ctx.left);
Expression right = getExpression(ctx.right);
switch (ctx.operator.getType()) {
case DorisParser.AND:
return new And(left, right);
case DorisParser.OR:
return new Or(left, right);
default:
throw new IllegalStateException("Unsupported logical binary type: " + ctx.operator.getText());
}
});
}
/**
* Create a predicated expression. A predicated expression is a normal expression with a
* predicate attached to it, for example:
* {{{
* a + 1 IS NULL
* }}}
*/
@Override
public Expression visitPredicated(PredicatedContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression e = getExpression(ctx.valueExpression());
return ctx.predicate() == null ? e : withPredicate(e, ctx.predicate());
});
}
@Override
public Expression visitArithmeticUnary(ArithmeticUnaryContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression e = getExpression(ctx);
switch (ctx.operator.getType()) {
case DorisParser.PLUS:
return e;
case DorisParser.MINUS:
default:
throw new IllegalStateException("Unsupported arithmetic unary type: " + ctx.operator.getText());
}
});
}
@Override
public Expression visitArithmeticBinary(ArithmeticBinaryContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression left = getExpression(ctx.left);
Expression right = getExpression(ctx.right);
int type = ctx.operator.getType();
if (left instanceof IntervalLiteral) {
if (type != DorisParser.PLUS) {
throw new IllegalArgumentException("Only supported: " + Operator.ADD);
}
IntervalLiteral interval = (IntervalLiteral) left;
return new TimestampArithmetic(Operator.ADD, right, interval.value(), interval.timeUnit(), true);
}
if (right instanceof IntervalLiteral) {
Operator op;
if (type == DorisParser.PLUS) {
op = Operator.ADD;
} else if (type == DorisParser.MINUS) {
op = Operator.SUBTRACT;
} else {
throw new IllegalArgumentException("Only supported: " + Operator.ADD + " and " + Operator.SUBTRACT);
}
IntervalLiteral interval = (IntervalLiteral) right;
return new TimestampArithmetic(op, left, interval.value(), interval.timeUnit(), false);
}
return ParserUtils.withOrigin(ctx, () -> {
switch (type) {
case DorisParser.ASTERISK:
return new Multiply(left, right);
case DorisParser.SLASH:
return new Divide(left, right);
case DorisParser.PERCENT:
return new Mod(left, right);
case DorisParser.PLUS:
return new Add(left, right);
case DorisParser.MINUS:
return new Subtract(left, right);
default:
throw new IllegalStateException(
"Unsupported arithmetic binary type: " + ctx.operator.getText());
}
});
});
}
/**
* Create a value based [[CaseWhen]] expression. This has the following SQL form:
* {{{
* CASE [expression]
* WHEN [value] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*/
@Override
public Expression visitSimpleCase(DorisParser.SimpleCaseContext context) {
Expression e = getExpression(context.value);
List<WhenClause> whenClauses = context.whenClause().stream()
.map(w -> new WhenClause(new EqualTo(e, getExpression(w.condition)), getExpression(w.result)))
.collect(Collectors.toList());
if (context.elseExpression == null) {
return new CaseWhen(whenClauses);
}
return new CaseWhen(whenClauses, getExpression(context.elseExpression));
}
/**
* Create a condition based [[CaseWhen]] expression. This has the following SQL syntax:
* {{{
* CASE
* WHEN [predicate] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*
* @param context the parse tree
*/
@Override
public Expression visitSearchedCase(DorisParser.SearchedCaseContext context) {
List<WhenClause> whenClauses = context.whenClause().stream()
.map(w -> new WhenClause(getExpression(w.condition), getExpression(w.result)))
.collect(Collectors.toList());
if (context.elseExpression == null) {
return new CaseWhen(whenClauses);
}
return new CaseWhen(whenClauses, getExpression(context.elseExpression));
}
@Override
public Expression visitCast(DorisParser.CastContext ctx) {
return ParserUtils.withOrigin(ctx, () ->
new Cast(getExpression(ctx.expression()), ctx.identifier().getText()));
}
@Override
public UnboundFunction visitExtract(DorisParser.ExtractContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
String functionName = ctx.field.getText();
return new UnboundFunction(functionName, false, Arrays.asList(getExpression(ctx.source)));
});
}
@Override
public UnboundFunction visitFunctionCall(DorisParser.FunctionCallContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
String functionName = ctx.identifier().getText();
boolean isDistinct = ctx.DISTINCT() != null;
List<Expression> params = visit(ctx.expression(), Expression.class);
return new UnboundFunction(functionName, isDistinct, params);
});
}
@Override
public Expression visitInterval(IntervalContext ctx) {
return new IntervalLiteral(getExpression(ctx.value), visitUnitIdentifier(ctx.unit));
}
@Override
public String visitUnitIdentifier(UnitIdentifierContext ctx) {
return ctx.getText();
}
@Override
public Expression visitTypeConstructor(TypeConstructorContext ctx) {
String value = ctx.STRING().getText();
value = value.substring(1, value.length() - 1);
String type = ctx.identifier().getText().toUpperCase();
switch (type) {
case "DATE":
return new DateLiteral(value);
case "DATETIME":
return new DateTimeLiteral(value);
default:
throw new IllegalStateException("Unsupported data type : " + type);
}
}
@Override
public Expression visitDereference(DereferenceContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression e = getExpression(ctx.base);
if (e instanceof UnboundSlot) {
UnboundSlot unboundAttribute = (UnboundSlot) e;
List<String> nameParts = Lists.newArrayList(unboundAttribute.getNameParts());
nameParts.add(ctx.fieldName.getText());
return new UnboundSlot(nameParts);
} else {
throw new IllegalStateException("Unsupported dereference expression: " + ctx.getText());
}
});
}
@Override
public UnboundSlot visitColumnReference(ColumnReferenceContext ctx) {
return UnboundSlot.quoted(ctx.getText());
}
/**
* Create a NULL literal expression.
*/
@Override
public Expression visitNullLiteral(NullLiteralContext ctx) {
return new NullLiteral();
}
@Override
public Literal visitBooleanLiteral(BooleanLiteralContext ctx) {
Boolean b = Boolean.valueOf(ctx.getText());
return new BooleanLiteral(b);
}
@Override
public Literal visitIntegerLiteral(IntegerLiteralContext ctx) {
Integer l = Integer.valueOf(ctx.getText());
return new IntegerLiteral(l);
}
@Override
public Literal visitStringLiteral(StringLiteralContext ctx) {
String s = ctx.STRING().stream()
.map(ParseTree::getText)
.map(str -> str.substring(1, str.length() - 1))
.reduce((s1, s2) -> s1 + s2)
.orElse("");
return new StringLiteral(s);
}
@Override
public Expression visitParenthesizedExpression(ParenthesizedExpressionContext ctx) {
return getExpression(ctx.expression());
}
@Override
public List<Expression> visitNamedExpressionSeq(NamedExpressionSeqContext namedCtx) {
return visit(namedCtx.namedExpression(), Expression.class);
}
/**
* Create OrderKey list.
*
* @param ctx QueryOrganizationContext
* @return List of OrderKey
*/
@Override
public List<OrderKey> visitQueryOrganization(QueryOrganizationContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
if (ctx.sortClause().ORDER() != null) {
return visit(ctx.sortClause().sortItem(), OrderKey.class);
} else {
return ImmutableList.of();
}
});
}
@Override
public LogicalPlan visitFromClause(FromClauseContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
LogicalPlan left = null;
for (RelationContext relation : ctx.relation()) {
LogicalPlan right = plan(relation.relationPrimary());
left = left == null
? right
: new LogicalJoin(JoinType.INNER_JOIN, Optional.empty(), left, right);
left = withJoinRelations(left, relation);
}
return left;
});
}
/* ********************************************************************************************
* Table Identifier parsing
* ******************************************************************************************** */
@Override
public List<String> visitMultipartIdentifier(MultipartIdentifierContext ctx) {
return ctx.parts.stream()
.map(RuleContext::getText)
.collect(ImmutableList.toImmutableList());
}
/**
* Create a Sequence of Strings for a parenthesis enclosed alias list.
*/
@Override
public List<String> visitIdentifierList(IdentifierListContext ctx) {
return visitIdentifierSeq(ctx.identifierSeq());
}
/**
* Create a Sequence of Strings for an identifier list.
*/
@Override
public List<String> visitIdentifierSeq(IdentifierSeqContext ctx) {
return ctx.ident.stream()
.map(RuleContext::getText)
.collect(ImmutableList.toImmutableList());
}
/**
* get OrderKey.
*
* @param ctx SortItemContext
* @return SortItems
*/
@Override
public OrderKey visitSortItem(SortItemContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
boolean isAsc = ctx.DESC() == null;
boolean isNullFirst = true;
Expression expression = typedVisit(ctx.expression());
return new OrderKey(expression, isAsc, isNullFirst);
});
}
private <T> List<T> visit(List<? extends ParserRuleContext> contexts, Class<T> clazz) {
return contexts.stream()
.map(this::visit)
.map(clazz::cast)
.collect(ImmutableList.toImmutableList());
}
private LogicalPlan plan(ParserRuleContext tree) {
return (LogicalPlan) tree.accept(this);
}
/* ********************************************************************************************
* Expression parsing
* ******************************************************************************************** */
/**
* Create an expression from the given context. This method just passes the context on to the
* visitor and only takes care of typing (We assume that the visitor returns an Expression here).
*/
private Expression getExpression(ParserRuleContext ctx) {
return typedVisit(ctx);
}
private LogicalPlan withQueryOrganization(LogicalPlan children, QueryOrganizationContext ctx) {
List<OrderKey> orderKeys = visitQueryOrganization(ctx);
return orderKeys.isEmpty() ? children : new LogicalSort(orderKeys, children);
}
/**
* Add a regular (SELECT) query specification to a logical plan. The query specification
* is the core of the logical plan, this is where sourcing (FROM clause), projection (SELECT),
* aggregation (GROUP BY ... HAVING ...) and filtering (WHERE) takes place.
*
* <p>Note that query hints are ignored (both by the parser and the builder).
*/
private LogicalPlan withSelectQuerySpecification(
ParserRuleContext ctx,
LogicalPlan inputRelation,
SelectClauseContext selectClause,
Optional<WhereClauseContext> whereClause,
Optional<AggClauseContext> aggClause) {
return ParserUtils.withOrigin(ctx, () -> {
LogicalPlan filter = withFilter(inputRelation, whereClause);
LogicalPlan aggregate = withAggregate(filter, selectClause, aggClause);
LogicalPlan having = aggregate;
LogicalPlan projection = withProjection(having, selectClause, aggClause);
return projection;
});
}
private LogicalPlan withRelation(Optional<FromClauseContext> ctx) {
if (ctx.isPresent()) {
return visitFromClause(ctx.get());
} else {
throw new IllegalStateException("Unsupported one row relation");
}
}
/**
* Join one more [[LogicalPlan]]s to the current logical plan.
*/
private LogicalPlan withJoinRelations(LogicalPlan input, RelationContext ctx) {
LogicalPlan last = input;
for (JoinRelationContext join : ctx.joinRelation()) {
JoinType joinType;
if (join.joinType().LEFT() != null) {
joinType = JoinType.LEFT_OUTER_JOIN;
} else if (join.joinType().RIGHT() != null) {
joinType = JoinType.RIGHT_OUTER_JOIN;
} else if (join.joinType().FULL() != null) {
joinType = JoinType.FULL_OUTER_JOIN;
} else if (join.joinType().SEMI() != null) {
joinType = JoinType.LEFT_SEMI_JOIN;
} else if (join.joinType().ANTI() != null) {
joinType = JoinType.LEFT_ANTI_JOIN;
} else if (join.joinType().CROSS() != null) {
joinType = JoinType.CROSS_JOIN;
} else {
joinType = JoinType.INNER_JOIN;
}
JoinCriteriaContext joinCriteria = join.joinCriteria();
Expression condition;
if (joinCriteria == null) {
condition = null;
} else {
condition = getExpression(joinCriteria.booleanExpression());
}
last = new LogicalJoin(joinType, Optional.ofNullable(condition), last, plan(join.relationPrimary()));
}
return last;
}
private LogicalPlan withProjection(LogicalPlan input, SelectClauseContext selectCtx,
Optional<AggClauseContext> aggCtx) {
return ParserUtils.withOrigin(selectCtx, () -> {
if (aggCtx.isPresent()) {
return input;
} else {
List<NamedExpression> projects = getNamedExpressions(selectCtx.namedExpressionSeq());
return new LogicalProject(projects, input);
}
});
}
private LogicalPlan withFilter(LogicalPlan input, Optional<WhereClauseContext> whereCtx) {
return input.optionalMap(whereCtx, () ->
new LogicalFilter(getExpression((whereCtx.get().booleanExpression())), input)
);
}
private LogicalPlan withAggregate(LogicalPlan input, SelectClauseContext selectCtx,
Optional<AggClauseContext> aggCtx) {
return input.optionalMap(aggCtx, () -> {
List<Expression> groupByExpressions = visit(aggCtx.get().groupByItem().expression(), Expression.class);
List<NamedExpression> namedExpressions = getNamedExpressions(selectCtx.namedExpressionSeq());
return new LogicalAggregate(groupByExpressions, namedExpressions, input);
});
}
/**
* match predicate type and generate different predicates.
*
* @param ctx PredicateContext
* @param valueExpression valueExpression
* @return Expression
*/
private Expression withPredicate(Expression valueExpression, PredicateContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression outExpression;
switch (ctx.kind.getType()) {
case DorisParser.BETWEEN:
outExpression = new Between(
valueExpression,
getExpression(ctx.lower),
getExpression(ctx.upper)
);
break;
case DorisParser.LIKE:
outExpression = new Like(
valueExpression,
getExpression(ctx.pattern)
);
break;
case DorisParser.REGEXP:
outExpression = new Regexp(
valueExpression,
getExpression(ctx.pattern)
);
break;
case DorisParser.IN:
if (ctx.query() == null) {
outExpression = null;
throw new IllegalStateException("Unsupported predicate type: " + ctx.kind.getText());
} else {
outExpression = new InSubquery(
valueExpression,
new ListQuery(typedVisit(ctx.query()))
);
}
break;
default:
throw new IllegalStateException("Unsupported predicate type: " + ctx.kind.getText());
}
return ctx.NOT() != null ? new Not(outExpression) : outExpression;
});
}
private List<NamedExpression> getNamedExpressions(NamedExpressionSeqContext namedCtx) {
return ParserUtils.withOrigin(namedCtx, () -> {
List<Expression> expressions = visit(namedCtx.namedExpression(), Expression.class);
List<NamedExpression> namedExpressions = expressions.stream().map(expression -> {
if (expression instanceof NamedExpression) {
return (NamedExpression) expression;
} else {
return new UnboundAlias(expression);
}
}).collect(ImmutableList.toImmutableList());
return namedExpressions;
});
}
@Override
public Expression visitSubqueryExpression(SubqueryExpressionContext subqueryExprCtx) {
return ParserUtils.withOrigin(subqueryExprCtx, () -> new ScalarSubquery(typedVisit(subqueryExprCtx.query())));
}
@Override
public Expression visitExist(ExistContext context) {
return ParserUtils.withOrigin(context, () -> new Exists(typedVisit(context.query())));
}
} | class LogicalPlanBuilder extends DorisParserBaseVisitor<Object> {
protected <T> T typedVisit(ParseTree ctx) {
return (T) ctx.accept(this);
}
/**
* Override the default behavior for all visit methods. This will only return a non-null result
* when the context has only one child. This is done because there is no generic method to
* combine the results of the context children. In all other cases null is returned.
*/
@Override
public Object visitChildren(RuleNode node) {
if (node.getChildCount() == 1) {
return node.getChild(0).accept(this);
} else {
return null;
}
}
@Override
public LogicalPlan visitSingleStatement(SingleStatementContext ctx) {
return ParserUtils.withOrigin(ctx, () -> (LogicalPlan) visit(ctx.statement()));
}
/**
* Visit multi-statements.
*/
@Override
public List<LogicalPlan> visitMultiStatements(MultiStatementsContext ctx) {
return visit(ctx.statement(), LogicalPlan.class);
}
/* ********************************************************************************************
* Plan parsing
* ******************************************************************************************** */
@Override
public Command visitExplain(ExplainContext ctx) {
LogicalPlan logicalPlan = plan(ctx.query());
ExplainLevel explainLevel = ExplainLevel.NORMAL;
if (ctx.level != null) {
explainLevel = ExplainLevel.valueOf(ctx.level.getText().toUpperCase(Locale.ROOT));
}
return new ExplainCommand(explainLevel, logicalPlan);
}
@Override
public LogicalPlan visitQuery(QueryContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
LogicalPlan query = plan(ctx.queryTerm());
return withQueryOrganization(query, ctx.queryOrganization());
});
}
@Override
public LogicalPlan visitRegularQuerySpecification(RegularQuerySpecificationContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
LogicalPlan relation = withRelation(Optional.ofNullable(ctx.fromClause()));
return withSelectQuerySpecification(
ctx, relation,
ctx.selectClause(),
Optional.ofNullable(ctx.whereClause()),
Optional.ofNullable(ctx.aggClause())
);
});
}
/**
* Create an aliased table reference. This is typically used in FROM clauses.
*/
@Developing
private LogicalPlan withTableAlias(LogicalPlan plan, TableAliasContext ctx) {
String alias = ctx.strictIdentifier().getText();
if (null != ctx.identifierList()) {
throw new ParseException("Do not implemented", ctx);
}
return new LogicalSubQueryAlias<>(alias, plan);
}
@Override
public LogicalPlan visitTableName(TableNameContext ctx) {
List<String> tableId = visitMultipartIdentifier(ctx.multipartIdentifier());
if (null == ctx.tableAlias().strictIdentifier()) {
return new UnboundRelation(tableId);
}
return withTableAlias(new UnboundRelation(tableId), ctx.tableAlias());
}
@Override
public LogicalPlan visitAliasedQuery(AliasedQueryContext ctx) {
return withTableAlias(visitQuery(ctx.query()), ctx.tableAlias());
}
@Override
/**
* Create a star (i.e. all) expression; this selects all elements (in the specified object).
* Both un-targeted (global) and targeted aliases are supported.
*/
@Override
public Expression visitStar(StarContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
final QualifiedNameContext qualifiedNameContext = ctx.qualifiedName();
List<String> target;
if (qualifiedNameContext != null) {
target = qualifiedNameContext.identifier()
.stream()
.map(RuleContext::getText)
.collect(ImmutableList.toImmutableList());
} else {
target = Collections.emptyList();
}
return new UnboundStar(target);
});
}
/**
* Create an aliased expression if an alias is specified. Both single and multi-aliases are
* supported.
*/
@Override
public Expression visitNamedExpression(NamedExpressionContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression expression = getExpression(ctx.expression());
if (ctx.name != null) {
return new Alias(expression, ctx.name.getText());
} else {
return expression;
}
});
}
/**
* Create a comparison expression. This compares two expressions. The following comparison
* operators are supported:
* - Equal: '=' or '=='
* - Null-safe Equal: '<=>'
* - Not Equal: '<>' or '!='
* - Less than: '<'
* - Less then or Equal: '<='
* - Greater than: '>'
* - Greater then or Equal: '>='
*/
@Override
public Expression visitComparison(ComparisonContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression left = getExpression(ctx.left);
Expression right = getExpression(ctx.right);
TerminalNode operator = (TerminalNode) ctx.comparisonOperator().getChild(0);
switch (operator.getSymbol().getType()) {
case DorisParser.EQ:
return new EqualTo(left, right);
case DorisParser.NEQ:
return new Not(new EqualTo(left, right));
case DorisParser.LT:
return new LessThan(left, right);
case DorisParser.GT:
return new GreaterThan(left, right);
case DorisParser.LTE:
return new LessThanEqual(left, right);
case DorisParser.GTE:
return new GreaterThanEqual(left, right);
case DorisParser.NSEQ:
return new NullSafeEqual(left, right);
default:
throw new IllegalStateException("Unsupported comparison expression: "
+ operator.getSymbol().getText());
}
});
}
/**
* Create a not expression.
* format: NOT Expression
* for example:
* not 1
* not 1=1
*/
@Override
public Expression visitLogicalNot(LogicalNotContext ctx) {
return ParserUtils.withOrigin(ctx, () -> new Not(getExpression(ctx.booleanExpression())));
}
@Override
public Expression visitLogicalBinary(LogicalBinaryContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression left = getExpression(ctx.left);
Expression right = getExpression(ctx.right);
switch (ctx.operator.getType()) {
case DorisParser.AND:
return new And(left, right);
case DorisParser.OR:
return new Or(left, right);
default:
throw new IllegalStateException("Unsupported logical binary type: " + ctx.operator.getText());
}
});
}
/**
* Create a predicated expression. A predicated expression is a normal expression with a
* predicate attached to it, for example:
* {{{
* a + 1 IS NULL
* }}}
*/
@Override
public Expression visitPredicated(PredicatedContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression e = getExpression(ctx.valueExpression());
return ctx.predicate() == null ? e : withPredicate(e, ctx.predicate());
});
}
@Override
public Expression visitArithmeticUnary(ArithmeticUnaryContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression e = getExpression(ctx);
switch (ctx.operator.getType()) {
case DorisParser.PLUS:
return e;
case DorisParser.MINUS:
default:
throw new IllegalStateException("Unsupported arithmetic unary type: " + ctx.operator.getText());
}
});
}
@Override
public Expression visitArithmeticBinary(ArithmeticBinaryContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression left = getExpression(ctx.left);
Expression right = getExpression(ctx.right);
int type = ctx.operator.getType();
if (left instanceof IntervalLiteral) {
if (type != DorisParser.PLUS) {
throw new IllegalArgumentException("Only supported: " + Operator.ADD);
}
IntervalLiteral interval = (IntervalLiteral) left;
return new TimestampArithmetic(Operator.ADD, right, interval.value(), interval.timeUnit(), true);
}
if (right instanceof IntervalLiteral) {
Operator op;
if (type == DorisParser.PLUS) {
op = Operator.ADD;
} else if (type == DorisParser.MINUS) {
op = Operator.SUBTRACT;
} else {
throw new IllegalArgumentException("Only supported: " + Operator.ADD + " and " + Operator.SUBTRACT);
}
IntervalLiteral interval = (IntervalLiteral) right;
return new TimestampArithmetic(op, left, interval.value(), interval.timeUnit(), false);
}
return ParserUtils.withOrigin(ctx, () -> {
switch (type) {
case DorisParser.ASTERISK:
return new Multiply(left, right);
case DorisParser.SLASH:
return new Divide(left, right);
case DorisParser.PERCENT:
return new Mod(left, right);
case DorisParser.PLUS:
return new Add(left, right);
case DorisParser.MINUS:
return new Subtract(left, right);
default:
throw new IllegalStateException(
"Unsupported arithmetic binary type: " + ctx.operator.getText());
}
});
});
}
/**
* Create a value based [[CaseWhen]] expression. This has the following SQL form:
* {{{
* CASE [expression]
* WHEN [value] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*/
@Override
public Expression visitSimpleCase(DorisParser.SimpleCaseContext context) {
Expression e = getExpression(context.value);
List<WhenClause> whenClauses = context.whenClause().stream()
.map(w -> new WhenClause(new EqualTo(e, getExpression(w.condition)), getExpression(w.result)))
.collect(Collectors.toList());
if (context.elseExpression == null) {
return new CaseWhen(whenClauses);
}
return new CaseWhen(whenClauses, getExpression(context.elseExpression));
}
/**
* Create a condition based [[CaseWhen]] expression. This has the following SQL syntax:
* {{{
* CASE
* WHEN [predicate] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*
* @param context the parse tree
*/
@Override
public Expression visitSearchedCase(DorisParser.SearchedCaseContext context) {
List<WhenClause> whenClauses = context.whenClause().stream()
.map(w -> new WhenClause(getExpression(w.condition), getExpression(w.result)))
.collect(Collectors.toList());
if (context.elseExpression == null) {
return new CaseWhen(whenClauses);
}
return new CaseWhen(whenClauses, getExpression(context.elseExpression));
}
@Override
public Expression visitCast(DorisParser.CastContext ctx) {
return ParserUtils.withOrigin(ctx, () ->
new Cast(getExpression(ctx.expression()), ctx.identifier().getText()));
}
@Override
public UnboundFunction visitExtract(DorisParser.ExtractContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
String functionName = ctx.field.getText();
return new UnboundFunction(functionName, false, false, Arrays.asList(getExpression(ctx.source)));
});
}
@Override
public UnboundFunction visitFunctionCall(DorisParser.FunctionCallContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
String functionName = ctx.identifier().getText();
boolean isDistinct = ctx.DISTINCT() != null;
List<Expression> params = visit(ctx.expression(), Expression.class);
for (Expression expression : params) {
if (expression instanceof UnboundStar && functionName.equalsIgnoreCase("count") && !isDistinct) {
return new UnboundFunction(functionName, false, true, new ArrayList<>());
}
}
return new UnboundFunction(functionName, isDistinct, false, params);
});
}
@Override
public Expression visitInterval(IntervalContext ctx) {
return new IntervalLiteral(getExpression(ctx.value), visitUnitIdentifier(ctx.unit));
}
@Override
public String visitUnitIdentifier(UnitIdentifierContext ctx) {
return ctx.getText();
}
@Override
public Expression visitTypeConstructor(TypeConstructorContext ctx) {
String value = ctx.STRING().getText();
value = value.substring(1, value.length() - 1);
String type = ctx.identifier().getText().toUpperCase();
switch (type) {
case "DATE":
return new DateLiteral(value);
case "DATETIME":
return new DateTimeLiteral(value);
default:
throw new IllegalStateException("Unsupported data type : " + type);
}
}
@Override
public Expression visitDereference(DereferenceContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression e = getExpression(ctx.base);
if (e instanceof UnboundSlot) {
UnboundSlot unboundAttribute = (UnboundSlot) e;
List<String> nameParts = Lists.newArrayList(unboundAttribute.getNameParts());
nameParts.add(ctx.fieldName.getText());
return new UnboundSlot(nameParts);
} else {
throw new IllegalStateException("Unsupported dereference expression: " + ctx.getText());
}
});
}
@Override
public UnboundSlot visitColumnReference(ColumnReferenceContext ctx) {
return UnboundSlot.quoted(ctx.getText());
}
/**
* Create a NULL literal expression.
*/
@Override
public Expression visitNullLiteral(NullLiteralContext ctx) {
return new NullLiteral();
}
@Override
public Literal visitBooleanLiteral(BooleanLiteralContext ctx) {
Boolean b = Boolean.valueOf(ctx.getText());
return new BooleanLiteral(b);
}
@Override
public Literal visitIntegerLiteral(IntegerLiteralContext ctx) {
Integer l = Integer.valueOf(ctx.getText());
return new IntegerLiteral(l);
}
@Override
public Literal visitStringLiteral(StringLiteralContext ctx) {
String s = ctx.STRING().stream()
.map(ParseTree::getText)
.map(str -> str.substring(1, str.length() - 1))
.reduce((s1, s2) -> s1 + s2)
.orElse("");
return new StringLiteral(s);
}
@Override
public Expression visitParenthesizedExpression(ParenthesizedExpressionContext ctx) {
return getExpression(ctx.expression());
}
@Override
public List<Expression> visitNamedExpressionSeq(NamedExpressionSeqContext namedCtx) {
return visit(namedCtx.namedExpression(), Expression.class);
}
@Override
public LogicalPlan visitRelation(RelationContext ctx) {
LogicalPlan right = plan(ctx.relationPrimary());
if (ctx.LATERAL() != null) {
if (!(right instanceof LogicalSubQueryAlias)) {
throw new IllegalStateException("lateral join right table should be sub-query");
}
}
return right;
}
@Override
public LogicalPlan visitFromClause(FromClauseContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
LogicalPlan left = null;
for (RelationContext relation : ctx.relation()) {
LogicalPlan right = visitRelation(relation);
left = (left == null) ? right :
new LogicalJoin<>(
JoinType.CROSS_JOIN,
Optional.empty(),
left,
right);
left = withJoinRelations(left, relation);
}
return left;
});
}
/* ********************************************************************************************
* Table Identifier parsing
* ******************************************************************************************** */
@Override
public List<String> visitMultipartIdentifier(MultipartIdentifierContext ctx) {
return ctx.parts.stream()
.map(RuleContext::getText)
.collect(ImmutableList.toImmutableList());
}
/**
* Create a Sequence of Strings for a parenthesis enclosed alias list.
*/
@Override
public List<String> visitIdentifierList(IdentifierListContext ctx) {
return visitIdentifierSeq(ctx.identifierSeq());
}
/**
* Create a Sequence of Strings for an identifier list.
*/
@Override
public List<String> visitIdentifierSeq(IdentifierSeqContext ctx) {
return ctx.ident.stream()
.map(RuleContext::getText)
.collect(ImmutableList.toImmutableList());
}
/**
* get OrderKey.
*
* @param ctx SortItemContext
* @return SortItems
*/
@Override
public OrderKey visitSortItem(SortItemContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
boolean isAsc = ctx.DESC() == null;
boolean isNullFirst = true;
Expression expression = typedVisit(ctx.expression());
return new OrderKey(expression, isAsc, isNullFirst);
});
}
private <T> List<T> visit(List<? extends ParserRuleContext> contexts, Class<T> clazz) {
return contexts.stream()
.map(this::visit)
.map(clazz::cast)
.collect(ImmutableList.toImmutableList());
}
private LogicalPlan plan(ParserRuleContext tree) {
return (LogicalPlan) tree.accept(this);
}
/* ********************************************************************************************
* Expression parsing
* ******************************************************************************************** */
/**
* Create an expression from the given context. This method just passes the context on to the
* visitor and only takes care of typing (We assume that the visitor returns an Expression here).
*/
private Expression getExpression(ParserRuleContext ctx) {
return typedVisit(ctx);
}
private LogicalPlan withQueryOrganization(LogicalPlan inputPlan, QueryOrganizationContext ctx) {
Optional<SortClauseContext> sortClauseContext = Optional.ofNullable(ctx.sortClause());
Optional<LimitClauseContext> limitClauseContext = Optional.ofNullable(ctx.limitClause());
LogicalPlan sort = withSort(inputPlan, sortClauseContext);
return withLimit(sort, limitClauseContext);
}
private LogicalPlan withSort(LogicalPlan input, Optional<SortClauseContext> sortCtx) {
return input.optionalMap(sortCtx, () -> {
List<OrderKey> orderKeys = visit(sortCtx.get().sortItem(), OrderKey.class);
return new LogicalSort(orderKeys, input);
});
}
private LogicalPlan withLimit(LogicalPlan input, Optional<LimitClauseContext> limitCtx) {
return input.optionalMap(limitCtx, () -> {
long limit = Long.parseLong(limitCtx.get().limit.getText());
long offset = 0;
Token offsetToken = limitCtx.get().offset;
if (offsetToken != null) {
if (input instanceof LogicalSort) {
offset = Long.parseLong(offsetToken.getText());
} else {
throw new IllegalStateException("OFFSET requires an ORDER BY clause");
}
}
return new LogicalLimit<>(limit, offset, input);
});
}
/**
* Add a regular (SELECT) query specification to a logical plan. The query specification
* is the core of the logical plan, this is where sourcing (FROM clause), projection (SELECT),
* aggregation (GROUP BY ... HAVING ...) and filtering (WHERE) takes place.
*
* <p>Note that query hints are ignored (both by the parser and the builder).
*/
private LogicalPlan withSelectQuerySpecification(
ParserRuleContext ctx,
LogicalPlan inputRelation,
SelectClauseContext selectClause,
Optional<WhereClauseContext> whereClause,
Optional<AggClauseContext> aggClause) {
return ParserUtils.withOrigin(ctx, () -> {
LogicalPlan filter = withFilter(inputRelation, whereClause);
LogicalPlan aggregate = withAggregate(filter, selectClause, aggClause);
LogicalPlan having = aggregate;
LogicalPlan projection = withProjection(having, selectClause, aggClause);
return projection;
});
}
private LogicalPlan withRelation(Optional<FromClauseContext> ctx) {
if (ctx.isPresent()) {
return visitFromClause(ctx.get());
} else {
throw new IllegalStateException("Unsupported one row relation");
}
}
/**
* Join one more [[LogicalPlan]]s to the current logical plan.
*/
private LogicalPlan withJoinRelations(LogicalPlan input, RelationContext ctx) {
LogicalPlan last = input;
for (JoinRelationContext join : ctx.joinRelation()) {
JoinType joinType;
if (join.joinType().CROSS() != null) {
joinType = JoinType.CROSS_JOIN;
} else if (join.joinType().FULL() != null) {
joinType = JoinType.FULL_OUTER_JOIN;
} else if (join.joinType().SEMI() != null) {
if (join.joinType().LEFT() != null) {
joinType = JoinType.LEFT_SEMI_JOIN;
} else {
joinType = JoinType.RIGHT_SEMI_JOIN;
}
} else if (join.joinType().ANTI() != null) {
if (join.joinType().LEFT() != null) {
joinType = JoinType.LEFT_ANTI_JOIN;
} else {
joinType = JoinType.RIGHT_ANTI_JOIN;
}
} else if (join.joinType().LEFT() != null) {
joinType = JoinType.LEFT_OUTER_JOIN;
} else if (join.joinType().RIGHT() != null) {
joinType = JoinType.RIGHT_OUTER_JOIN;
} else {
joinType = JoinType.INNER_JOIN;
}
JoinCriteriaContext joinCriteria = join.joinCriteria();
Expression condition;
if (joinCriteria == null) {
condition = null;
} else {
condition = getExpression(joinCriteria.booleanExpression());
}
last = new LogicalJoin<>(joinType, Optional.ofNullable(condition), last, plan(join.relationPrimary()));
}
return last;
}
private LogicalPlan withProjection(LogicalPlan input, SelectClauseContext selectCtx,
Optional<AggClauseContext> aggCtx) {
return ParserUtils.withOrigin(selectCtx, () -> {
if (aggCtx.isPresent()) {
return input;
} else {
List<NamedExpression> projects = getNamedExpressions(selectCtx.namedExpressionSeq());
return new LogicalProject<>(projects, input);
}
});
}
private LogicalPlan withFilter(LogicalPlan input, Optional<WhereClauseContext> whereCtx) {
return input.optionalMap(whereCtx, () ->
new LogicalFilter<>(getExpression((whereCtx.get().booleanExpression())), input)
);
}
private LogicalPlan withAggregate(LogicalPlan input, SelectClauseContext selectCtx,
Optional<AggClauseContext> aggCtx) {
return input.optionalMap(aggCtx, () -> {
List<Expression> groupByExpressions = visit(aggCtx.get().groupByItem().expression(), Expression.class);
List<NamedExpression> namedExpressions = getNamedExpressions(selectCtx.namedExpressionSeq());
return new LogicalAggregate<>(groupByExpressions, namedExpressions, input);
});
}
/**
* match predicate type and generate different predicates.
*
* @param ctx PredicateContext
* @param valueExpression valueExpression
* @return Expression
*/
private Expression withPredicate(Expression valueExpression, PredicateContext ctx) {
return ParserUtils.withOrigin(ctx, () -> {
Expression outExpression;
switch (ctx.kind.getType()) {
case DorisParser.BETWEEN:
outExpression = new Between(
valueExpression,
getExpression(ctx.lower),
getExpression(ctx.upper)
);
break;
case DorisParser.LIKE:
outExpression = new Like(
valueExpression,
getExpression(ctx.pattern)
);
break;
case DorisParser.REGEXP:
outExpression = new Regexp(
valueExpression,
getExpression(ctx.pattern)
);
break;
case DorisParser.IN:
if (ctx.query() == null) {
outExpression = new InPredicate(
valueExpression,
withInList(ctx)
);
} else {
outExpression = new InSubquery(
valueExpression,
new ListQuery(typedVisit(ctx.query()))
);
}
break;
default:
throw new IllegalStateException("Unsupported predicate type: " + ctx.kind.getText());
}
return ctx.NOT() != null ? new Not(outExpression) : outExpression;
});
}
private List<NamedExpression> getNamedExpressions(NamedExpressionSeqContext namedCtx) {
return ParserUtils.withOrigin(namedCtx, () -> {
List<Expression> expressions = visit(namedCtx.namedExpression(), Expression.class);
List<NamedExpression> namedExpressions = expressions.stream().map(expression -> {
if (expression instanceof NamedExpression) {
return (NamedExpression) expression;
} else {
return new UnboundAlias(expression);
}
}).collect(ImmutableList.toImmutableList());
return namedExpressions;
});
}
@Override
public Expression visitSubqueryExpression(SubqueryExpressionContext subqueryExprCtx) {
return ParserUtils.withOrigin(subqueryExprCtx, () -> new ScalarSubquery(typedVisit(subqueryExprCtx.query())));
}
@Override
public Expression visitExist(ExistContext context) {
return ParserUtils.withOrigin(context, () -> new Exists(typedVisit(context.query())));
}
public List<Expression> withInList(PredicateContext ctx) {
List<Expression> expressions = ctx.expression().stream()
.map(this::getExpression).collect(ImmutableList.toImmutableList());
return expressions;
}
} |
Perhaps you should add the names from NodeResources here as alternatives so that we can make things more consistent over time? | private Node applyField(Node node, String name, Inspector value) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
throw new IllegalArgumentException("Docker image can only be set for docker containers");
return node.with(node.status().withDockerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.with(node.status().withOsVersion(Version.fromString(asString(value))));
case "currentFirmwareCheck":
return node.with(node.status().withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value))));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case HARDWARE_FAILURE_DESCRIPTION:
return node.with(node.status().withHardwareFailureDescription(removeQuotedNulls(asOptionalString(value))));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), nodes);
case "additionalIpAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes);
case WANT_TO_RETIRE :
return node.withWantToRetire(asBoolean(value), Agent.operator, clock.instant());
case WANT_TO_DEPROVISION :
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "hardwareDivergence" :
return node.with(node.status().withHardwareDivergence(removeQuotedNulls(asOptionalString(value))));
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())));
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())));
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())));
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().withDiskSpeed(value.asBool() ? fast : slow)));
case "bandwidth":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble() / 1000)));
case "modelName":
if (value.type() == Type.NIX) {
return node.withoutModelName();
}
return node.withModelName(asString(value));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | case "minDiskAvailableGb": | private Node applyField(Node node, String name, Inspector value) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
throw new IllegalArgumentException("Docker image can only be set for docker containers");
return node.with(node.status().withDockerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.with(node.status().withOsVersion(Version.fromString(asString(value))));
case "currentFirmwareCheck":
return node.with(node.status().withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value))));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case HARDWARE_FAILURE_DESCRIPTION:
return node.with(node.status().withHardwareFailureDescription(removeQuotedNulls(asOptionalString(value))));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), nodes);
case "additionalIpAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes);
case WANT_TO_RETIRE :
return node.withWantToRetire(asBoolean(value), Agent.operator, clock.instant());
case WANT_TO_DEPROVISION :
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "hardwareDivergence" :
return node.with(node.status().withHardwareDivergence(removeQuotedNulls(asOptionalString(value))));
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "diskGb":
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())));
case "memoryGb":
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())));
case "vcpu":
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())));
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().withDiskSpeed(value.asBool() ? fast : slow)));
case "bandwidthGbps":
case "bandwidth":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble() / 1000)));
case "modelName":
if (value.type() == Type.NIX) {
return node.withoutModelName();
}
return node.withModelName(asString(value));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | class NodePatcher {
private static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription";
private static final String WANT_TO_RETIRE = "wantToRetire";
private static final String WANT_TO_DEPROVISION = "wantToDeprovision";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final LockedNodeList nodes;
private final Clock clock;
private Node node;
private List<Node> children;
private boolean childrenModified = false;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, LockedNodeList nodes, Clock clock) {
this.nodeFlavors = nodeFlavors;
this.node = node;
this.children = node.type().isDockerHost() ? nodes.childrenOf(node).asList() : List.of();
this.nodes = nodes;
this.clock = clock;
try {
this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
} catch (IOException e) {
throw new UncheckedIOException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(node, name, value);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
try {
children = applyFieldRecursive(children, name, value);
childrenModified = true;
} catch (IllegalArgumentException e) {
}
} );
List<Node> nodes = childrenModified ? new ArrayList<>(children) : new ArrayList<>();
nodes.add(node);
return nodes;
}
private List<Node> applyFieldRecursive(List<Node> childNodes, String name, Inspector value) {
switch (name) {
case HARDWARE_FAILURE_DESCRIPTION:
case WANT_TO_RETIRE:
case WANT_TO_DEPROVISION:
return childNodes.stream()
.map(child -> applyField(child, name, value))
.collect(Collectors.toList());
default :
throw new IllegalArgumentException("Field " + name + " is not recursive");
}
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
if (reportsInspector.type() == Type.NIX) return node.with(new Reports());
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
return node.with(reportsBuilder.build());
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private Optional<String> removeQuotedNulls(Optional<String> value) {
return value.filter(v -> !v.equals("null"));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} | class NodePatcher {
private static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription";
private static final String WANT_TO_RETIRE = "wantToRetire";
private static final String WANT_TO_DEPROVISION = "wantToDeprovision";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final LockedNodeList nodes;
private final Clock clock;
private Node node;
private List<Node> children;
private boolean childrenModified = false;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, LockedNodeList nodes, Clock clock) {
this.nodeFlavors = nodeFlavors;
this.node = node;
this.children = node.type().isDockerHost() ? nodes.childrenOf(node).asList() : List.of();
this.nodes = nodes;
this.clock = clock;
try {
this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
} catch (IOException e) {
throw new UncheckedIOException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(node, name, value);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
try {
children = applyFieldRecursive(children, name, value);
childrenModified = true;
} catch (IllegalArgumentException e) {
}
} );
List<Node> nodes = childrenModified ? new ArrayList<>(children) : new ArrayList<>();
nodes.add(node);
return nodes;
}
private List<Node> applyFieldRecursive(List<Node> childNodes, String name, Inspector value) {
switch (name) {
case HARDWARE_FAILURE_DESCRIPTION:
case WANT_TO_RETIRE:
case WANT_TO_DEPROVISION:
return childNodes.stream()
.map(child -> applyField(child, name, value))
.collect(Collectors.toList());
default :
throw new IllegalArgumentException("Field " + name + " is not recursive");
}
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
if (reportsInspector.type() == Type.NIX) return node.with(new Reports());
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
return node.with(reportsBuilder.build());
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private Optional<String> removeQuotedNulls(Optional<String> value) {
return value.filter(v -> !v.equals("null"));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} |
Added the NodeResources names here, but would create too much of a mess to add it under node creation as well. For `/nodes/v3` we should perhaps have a `resources` object? | private Node applyField(Node node, String name, Inspector value) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
throw new IllegalArgumentException("Docker image can only be set for docker containers");
return node.with(node.status().withDockerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.with(node.status().withOsVersion(Version.fromString(asString(value))));
case "currentFirmwareCheck":
return node.with(node.status().withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value))));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case HARDWARE_FAILURE_DESCRIPTION:
return node.with(node.status().withHardwareFailureDescription(removeQuotedNulls(asOptionalString(value))));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), nodes);
case "additionalIpAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes);
case WANT_TO_RETIRE :
return node.withWantToRetire(asBoolean(value), Agent.operator, clock.instant());
case WANT_TO_DEPROVISION :
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "hardwareDivergence" :
return node.with(node.status().withHardwareDivergence(removeQuotedNulls(asOptionalString(value))));
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())));
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())));
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())));
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().withDiskSpeed(value.asBool() ? fast : slow)));
case "bandwidth":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble() / 1000)));
case "modelName":
if (value.type() == Type.NIX) {
return node.withoutModelName();
}
return node.withModelName(asString(value));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | case "minDiskAvailableGb": | private Node applyField(Node node, String name, Inspector value) {
switch (name) {
case "currentRebootGeneration" :
return node.withCurrentRebootGeneration(asLong(value), clock.instant());
case "currentRestartGeneration" :
return patchCurrentRestartGeneration(asLong(value));
case "currentDockerImage" :
if (node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
throw new IllegalArgumentException("Docker image can only be set for docker containers");
return node.with(node.status().withDockerImage(DockerImage.fromString(asString(value))));
case "vespaVersion" :
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
return node.with(node.status().withOsVersion(Version.fromString(asString(value))));
case "currentFirmwareCheck":
return node.with(node.status().withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value))));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
return node.with(nodeFlavors.getFlavorOrThrow(asString(value)));
case HARDWARE_FAILURE_DESCRIPTION:
return node.with(node.status().withHardwareFailureDescription(removeQuotedNulls(asOptionalString(value))));
case "parentHostname" :
return node.withParentHostname(asString(value));
case "ipAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), nodes);
case "additionalIpAddresses" :
return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), nodes);
case WANT_TO_RETIRE :
return node.withWantToRetire(asBoolean(value), Agent.operator, clock.instant());
case WANT_TO_DEPROVISION :
return node.with(node.status().withWantToDeprovision(asBoolean(value)));
case "hardwareDivergence" :
return node.with(node.status().withHardwareDivergence(removeQuotedNulls(asOptionalString(value))));
case "reports" :
return nodeWithPatchedReports(node, value);
case "openStackId" :
return node.withOpenStackId(asString(value));
case "diskGb":
case "minDiskAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())));
case "memoryGb":
case "minMainMemoryAvailableGb":
return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble())));
case "vcpu":
case "minCpuCores":
return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble())));
case "fastDisk":
return node.with(node.flavor().with(node.flavor().resources().withDiskSpeed(value.asBool() ? fast : slow)));
case "bandwidthGbps":
case "bandwidth":
return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble() / 1000)));
case "modelName":
if (value.type() == Type.NIX) {
return node.withoutModelName();
}
return node.withModelName(asString(value));
default :
throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field");
}
} | class NodePatcher {
private static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription";
private static final String WANT_TO_RETIRE = "wantToRetire";
private static final String WANT_TO_DEPROVISION = "wantToDeprovision";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final LockedNodeList nodes;
private final Clock clock;
private Node node;
private List<Node> children;
private boolean childrenModified = false;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, LockedNodeList nodes, Clock clock) {
this.nodeFlavors = nodeFlavors;
this.node = node;
this.children = node.type().isDockerHost() ? nodes.childrenOf(node).asList() : List.of();
this.nodes = nodes;
this.clock = clock;
try {
this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
} catch (IOException e) {
throw new UncheckedIOException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(node, name, value);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
try {
children = applyFieldRecursive(children, name, value);
childrenModified = true;
} catch (IllegalArgumentException e) {
}
} );
List<Node> nodes = childrenModified ? new ArrayList<>(children) : new ArrayList<>();
nodes.add(node);
return nodes;
}
private List<Node> applyFieldRecursive(List<Node> childNodes, String name, Inspector value) {
switch (name) {
case HARDWARE_FAILURE_DESCRIPTION:
case WANT_TO_RETIRE:
case WANT_TO_DEPROVISION:
return childNodes.stream()
.map(child -> applyField(child, name, value))
.collect(Collectors.toList());
default :
throw new IllegalArgumentException("Field " + name + " is not recursive");
}
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
if (reportsInspector.type() == Type.NIX) return node.with(new Reports());
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
return node.with(reportsBuilder.build());
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private Optional<String> removeQuotedNulls(Optional<String> value) {
return value.filter(v -> !v.equals("null"));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} | class NodePatcher {
private static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription";
private static final String WANT_TO_RETIRE = "wantToRetire";
private static final String WANT_TO_DEPROVISION = "wantToDeprovision";
private final NodeFlavors nodeFlavors;
private final Inspector inspector;
private final LockedNodeList nodes;
private final Clock clock;
private Node node;
private List<Node> children;
private boolean childrenModified = false;
public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, LockedNodeList nodes, Clock clock) {
this.nodeFlavors = nodeFlavors;
this.node = node;
this.children = node.type().isDockerHost() ? nodes.childrenOf(node).asList() : List.of();
this.nodes = nodes;
this.clock = clock;
try {
this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get();
} catch (IOException e) {
throw new UncheckedIOException("Error reading request body", e);
}
}
/**
* Apply the json to the node and return all nodes affected by the patch.
* More than 1 node may be affected if e.g. the node is a Docker host, which may have
* children that must be updated in a consistent manner.
*/
public List<Node> apply() {
inspector.traverse((String name, Inspector value) -> {
try {
node = applyField(node, name, value);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Could not set field '" + name + "'", e);
}
try {
children = applyFieldRecursive(children, name, value);
childrenModified = true;
} catch (IllegalArgumentException e) {
}
} );
List<Node> nodes = childrenModified ? new ArrayList<>(children) : new ArrayList<>();
nodes.add(node);
return nodes;
}
private List<Node> applyFieldRecursive(List<Node> childNodes, String name, Inspector value) {
switch (name) {
case HARDWARE_FAILURE_DESCRIPTION:
case WANT_TO_RETIRE:
case WANT_TO_DEPROVISION:
return childNodes.stream()
.map(child -> applyField(child, name, value))
.collect(Collectors.toList());
default :
throw new IllegalArgumentException("Field " + name + " is not recursive");
}
}
private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) {
if (reportsInspector.type() == Type.NIX) return node.with(new Reports());
var reportsBuilder = new Reports.Builder(node.reports());
reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> {
if (reportInspector.type() == Type.NIX) {
reportsBuilder.clearReport(reportId);
} else {
reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector));
}
});
return node.with(reportsBuilder.build());
}
private Set<String> asStringSet(Inspector field) {
if ( ! field.type().equals(Type.ARRAY))
throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type());
TreeSet<String> strings = new TreeSet<>();
for (int i = 0; i < field.entries(); i++) {
Inspector entry = field.entry(i);
if ( ! entry.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type());
strings.add(entry.asString());
}
return strings;
}
private Node patchCurrentRestartGeneration(Long value) {
Optional<Allocation> allocation = node.allocation();
if (allocation.isPresent())
return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value)));
else
throw new IllegalArgumentException("Node is not allocated");
}
private Long asLong(Inspector field) {
if ( ! field.type().equals(Type.LONG))
throw new IllegalArgumentException("Expected a LONG value, got a " + field.type());
return field.asLong();
}
private String asString(Inspector field) {
if ( ! field.type().equals(Type.STRING))
throw new IllegalArgumentException("Expected a STRING value, got a " + field.type());
return field.asString();
}
private Optional<String> asOptionalString(Inspector field) {
return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field));
}
private Optional<String> removeQuotedNulls(Optional<String> value) {
return value.filter(v -> !v.equals("null"));
}
private boolean asBoolean(Inspector field) {
if ( ! field.type().equals(Type.BOOL))
throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type());
return field.asBool();
}
} |
invalid hostname? | public void rejects_invalid_hostname() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, "invalidhostname", List.of(nodeIp));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
} | public void rejects_invalid_hostname() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, "invalidhostname", List.of(nodeIp));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
} | class InstanceValidatorTest {
private final ApplicationId applicationId = ApplicationId.from("tenant", "application", "instance");
private final String domain = "domain";
private final String service = "service";
@Test
public void application_does_not_exist() {
SuperModelProvider superModelProvider = mockSuperModelProvider();
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_does_not_have_domain_set() {
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.emptyList()));
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_has_wrong_domain() {
ServiceInfo serviceInfo = new ServiceInfo("serviceName", "type", Collections.emptyList(),
Collections.singletonMap(SERVICE_PROPERTIES_DOMAIN_KEY, "not-domain"), "confId", "hostName");
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.singletonList(serviceInfo)));
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_has_same_domain_and_service() {
Map<String, String> properties = new HashMap<>();
properties.put(SERVICE_PROPERTIES_DOMAIN_KEY, domain);
properties.put(SERVICE_PROPERTIES_SERVICE_KEY, service);
ServiceInfo serviceInfo = new ServiceInfo("serviceName", "type", Collections.emptyList(),
properties, "confId", "hostName");
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.singletonList(serviceInfo)));
IdentityDocumentSigner signer = mock(IdentityDocumentSigner.class);
when(signer.hasValidSignature(any(), any())).thenReturn(true);
InstanceValidator instanceValidator = new InstanceValidator(mock(KeyProvider.class), superModelProvider, null, signer);
assertTrue(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void rejects_invalid_provider_unique_id_in_csr() {
SuperModelProvider superModelProvider = mockSuperModelProvider();
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
InstanceConfirmation instanceConfirmation = createRegisterInstanceConfirmation(applicationId, domain, service);
VespaUniqueInstanceId tamperedId = new VespaUniqueInstanceId(0, "default", "instance", "app", "tenant", "us-north-1", "dev", IdentityType.NODE);
instanceConfirmation.set("sanDNS", tamperedId.asDottedString() + ".instanceid.athenz.dev-us-north-1.vespa.yahoo.cloud");
assertFalse(instanceValidator.isValidInstance(instanceConfirmation));
}
@Test
public void accepts_valid_refresh_requests() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, node.hostname(), List.of(nodeIp));
assertTrue(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
public void rejects_refresh_on_ip_mismatch() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, node.hostname(), List.of(nodeIp, "::ff"));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
@Test
public void rejects_hostname_for_tenant_certificates() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.TENANT, node.hostname(), List.of(nodeIp));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
public void rejects_refresh_when_node_is_not_allocated() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
when(nodeRepository.getNodes()).thenReturn(nodeList);
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, nodeList.get(0).hostname(), List.of("::11"));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
private InstanceConfirmation createRegisterInstanceConfirmation(ApplicationId applicationId, String domain, String service) {
VespaUniqueInstanceId vespaUniqueInstanceId = new VespaUniqueInstanceId(0, "default", applicationId.instance().value(), applicationId.application().value(), applicationId.tenant().value(), "us-north-1", "dev", IdentityType.NODE);
SignedIdentityDocument signedIdentityDocument = new SignedIdentityDocument(null,
0,
vespaUniqueInstanceId,
new AthenzService(domain, service),
0,
"localhost",
"localhost",
Instant.now(),
Collections.emptySet(),
IdentityType.NODE);
return createInstanceConfirmation(vespaUniqueInstanceId, domain, service, signedIdentityDocument);
}
private InstanceConfirmation createRefreshInstanceConfirmation(ApplicationId applicationId, String domain, String service, IdentityType identityType, String hostname, List<String> ips) {
VespaUniqueInstanceId vespaUniqueInstanceId = new VespaUniqueInstanceId(0, "default", applicationId.instance().value(), applicationId.application().value(), applicationId.tenant().value(), "us-north-1", "dev", identityType);
InstanceConfirmation instanceConfirmation = createInstanceConfirmation(vespaUniqueInstanceId, domain, service, null);
instanceConfirmation.set("sanIP", String.join(",", ips));
instanceConfirmation.set(InstanceConfirmation.HOSTNAME_ATTRIBUTE, hostname);
return instanceConfirmation;
}
private InstanceConfirmation createInstanceConfirmation(VespaUniqueInstanceId vespaUniqueInstanceId, String domain, String service, SignedIdentityDocument identityDocument) {
InstanceConfirmation instanceConfirmation = new InstanceConfirmation(
"vespa.vespa.cd.provider_dev_us-north-1",
domain,
service,
Optional.ofNullable(identityDocument)
.map(EntityBindingsMapper::toSignedIdentityDocumentEntity)
.orElse(null));
instanceConfirmation.set("sanDNS", vespaUniqueInstanceId.asDottedString() + ".instanceid.athenz.dev-us-north-1.vespa.yahoo.cloud");
return instanceConfirmation;
}
private SuperModelProvider mockSuperModelProvider(ApplicationInfo... appInfos) {
SuperModel superModel = new SuperModel(Stream.of(appInfos)
.collect(Collectors.toMap(
ApplicationInfo::getApplicationId,
Function.identity()
)
));
SuperModelProvider superModelProvider = mock(SuperModelProvider.class);
when(superModelProvider.getSuperModel()).thenReturn(superModel);
return superModelProvider;
}
private ApplicationInfo mockApplicationInfo(ApplicationId appId, int numHosts, List<ServiceInfo> serviceInfo) {
List<HostInfo> hosts = IntStream.range(0, numHosts)
.mapToObj(i -> new HostInfo("host-" + i + "." + appId.toShortString() + ".yahoo.com", serviceInfo))
.collect(Collectors.toList());
Model model = mock(Model.class);
when(model.getHosts()).thenReturn(hosts);
return new ApplicationInfo(appId, 0, model);
}
private List<Node> createNodes(int num) {
MockNodeFlavors flavors = new MockNodeFlavors();
List<Node> nodeList = new ArrayList<>();
for (int i = 0; i < num; i++) {
Node node = Node.create("foo" + i, new IP.Config(Set.of("::1" + i, "::2" + i, "::3" + i), Set.of()), "foo" + i, Optional.empty(), Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant);
nodeList.add(node);
}
return nodeList;
}
private List<Node> allocateNode(List<Node> nodeList, Node node, ApplicationId applicationId) {
nodeList.removeIf(n -> n.id().equals(node.id()));
nodeList.add(node.allocate(applicationId, ClusterMembership.from("container/default/0/0", Version.fromString("6.123.4")), Instant.now()));
return nodeList;
}
} | class InstanceValidatorTest {
private final ApplicationId applicationId = ApplicationId.from("tenant", "application", "instance");
private final String domain = "domain";
private final String service = "service";
@Test
public void application_does_not_exist() {
SuperModelProvider superModelProvider = mockSuperModelProvider();
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_does_not_have_domain_set() {
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.emptyList()));
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_has_wrong_domain() {
ServiceInfo serviceInfo = new ServiceInfo("serviceName", "type", Collections.emptyList(),
Collections.singletonMap(SERVICE_PROPERTIES_DOMAIN_KEY, "not-domain"), "confId", "hostName");
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.singletonList(serviceInfo)));
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_has_same_domain_and_service() {
Map<String, String> properties = new HashMap<>();
properties.put(SERVICE_PROPERTIES_DOMAIN_KEY, domain);
properties.put(SERVICE_PROPERTIES_SERVICE_KEY, service);
ServiceInfo serviceInfo = new ServiceInfo("serviceName", "type", Collections.emptyList(),
properties, "confId", "hostName");
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.singletonList(serviceInfo)));
IdentityDocumentSigner signer = mock(IdentityDocumentSigner.class);
when(signer.hasValidSignature(any(), any())).thenReturn(true);
InstanceValidator instanceValidator = new InstanceValidator(mock(KeyProvider.class), superModelProvider, null, signer);
assertTrue(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void rejects_invalid_provider_unique_id_in_csr() {
SuperModelProvider superModelProvider = mockSuperModelProvider();
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
InstanceConfirmation instanceConfirmation = createRegisterInstanceConfirmation(applicationId, domain, service);
VespaUniqueInstanceId tamperedId = new VespaUniqueInstanceId(0, "default", "instance", "app", "tenant", "us-north-1", "dev", IdentityType.NODE);
instanceConfirmation.set("sanDNS", tamperedId.asDottedString() + ".instanceid.athenz.dev-us-north-1.vespa.yahoo.cloud");
assertFalse(instanceValidator.isValidInstance(instanceConfirmation));
}
@Test
public void accepts_valid_refresh_requests() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, node.hostname(), List.of(nodeIp));
assertTrue(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
public void rejects_refresh_on_ip_mismatch() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, node.hostname(), List.of(nodeIp, "::ff"));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
@Test
public void rejects_hostname_for_tenant_certificates() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.TENANT, node.hostname(), List.of(nodeIp));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
public void rejects_refresh_when_node_is_not_allocated() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
when(nodeRepository.getNodes()).thenReturn(nodeList);
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, nodeList.get(0).hostname(), List.of("::11"));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
private InstanceConfirmation createRegisterInstanceConfirmation(ApplicationId applicationId, String domain, String service) {
VespaUniqueInstanceId vespaUniqueInstanceId = new VespaUniqueInstanceId(0, "default", applicationId.instance().value(), applicationId.application().value(), applicationId.tenant().value(), "us-north-1", "dev", IdentityType.NODE);
SignedIdentityDocument signedIdentityDocument = new SignedIdentityDocument(null,
0,
vespaUniqueInstanceId,
new AthenzService(domain, service),
0,
"localhost",
"localhost",
Instant.now(),
Collections.emptySet(),
IdentityType.NODE);
return createInstanceConfirmation(vespaUniqueInstanceId, domain, service, signedIdentityDocument);
}
private InstanceConfirmation createRefreshInstanceConfirmation(ApplicationId applicationId, String domain, String service, IdentityType identityType, String hostname, List<String> ips) {
VespaUniqueInstanceId vespaUniqueInstanceId = new VespaUniqueInstanceId(0, "default", applicationId.instance().value(), applicationId.application().value(), applicationId.tenant().value(), "us-north-1", "dev", identityType);
InstanceConfirmation instanceConfirmation = createInstanceConfirmation(vespaUniqueInstanceId, domain, service, null);
instanceConfirmation.set("sanIP", String.join(",", ips));
instanceConfirmation.set(InstanceConfirmation.HOSTNAME_ATTRIBUTE, hostname);
return instanceConfirmation;
}
private InstanceConfirmation createInstanceConfirmation(VespaUniqueInstanceId vespaUniqueInstanceId, String domain, String service, SignedIdentityDocument identityDocument) {
InstanceConfirmation instanceConfirmation = new InstanceConfirmation(
"vespa.vespa.cd.provider_dev_us-north-1",
domain,
service,
Optional.ofNullable(identityDocument)
.map(EntityBindingsMapper::toSignedIdentityDocumentEntity)
.orElse(null));
instanceConfirmation.set("sanDNS", vespaUniqueInstanceId.asDottedString() + ".instanceid.athenz.dev-us-north-1.vespa.yahoo.cloud");
return instanceConfirmation;
}
private SuperModelProvider mockSuperModelProvider(ApplicationInfo... appInfos) {
SuperModel superModel = new SuperModel(Stream.of(appInfos)
.collect(Collectors.toMap(
ApplicationInfo::getApplicationId,
Function.identity()
)
));
SuperModelProvider superModelProvider = mock(SuperModelProvider.class);
when(superModelProvider.getSuperModel()).thenReturn(superModel);
return superModelProvider;
}
private ApplicationInfo mockApplicationInfo(ApplicationId appId, int numHosts, List<ServiceInfo> serviceInfo) {
List<HostInfo> hosts = IntStream.range(0, numHosts)
.mapToObj(i -> new HostInfo("host-" + i + "." + appId.toShortString() + ".yahoo.com", serviceInfo))
.collect(Collectors.toList());
Model model = mock(Model.class);
when(model.getHosts()).thenReturn(hosts);
return new ApplicationInfo(appId, 0, model);
}
private List<Node> createNodes(int num) {
MockNodeFlavors flavors = new MockNodeFlavors();
List<Node> nodeList = new ArrayList<>();
for (int i = 0; i < num; i++) {
Node node = Node.create("foo" + i, new IP.Config(Set.of("::1" + i, "::2" + i, "::3" + i), Set.of()), "foo" + i, Optional.empty(), Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant);
nodeList.add(node);
}
return nodeList;
}
private List<Node> allocateNode(List<Node> nodeList, Node node, ApplicationId applicationId) {
nodeList.removeIf(n -> n.id().equals(node.id()));
nodeList.add(node.allocate(applicationId, ClusterMembership.from("container/default/0/0", Version.fromString("6.123.4")), Instant.now()));
return nodeList;
}
} | |
hostname? | public void rejects_hostname_for_tenant_certificates() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.TENANT, node.hostname(), List.of(nodeIp));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
} | public void rejects_hostname_for_tenant_certificates() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.TENANT, node.hostname(), List.of(nodeIp));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
} | class InstanceValidatorTest {
private final ApplicationId applicationId = ApplicationId.from("tenant", "application", "instance");
private final String domain = "domain";
private final String service = "service";
@Test
public void application_does_not_exist() {
SuperModelProvider superModelProvider = mockSuperModelProvider();
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_does_not_have_domain_set() {
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.emptyList()));
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_has_wrong_domain() {
ServiceInfo serviceInfo = new ServiceInfo("serviceName", "type", Collections.emptyList(),
Collections.singletonMap(SERVICE_PROPERTIES_DOMAIN_KEY, "not-domain"), "confId", "hostName");
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.singletonList(serviceInfo)));
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_has_same_domain_and_service() {
Map<String, String> properties = new HashMap<>();
properties.put(SERVICE_PROPERTIES_DOMAIN_KEY, domain);
properties.put(SERVICE_PROPERTIES_SERVICE_KEY, service);
ServiceInfo serviceInfo = new ServiceInfo("serviceName", "type", Collections.emptyList(),
properties, "confId", "hostName");
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.singletonList(serviceInfo)));
IdentityDocumentSigner signer = mock(IdentityDocumentSigner.class);
when(signer.hasValidSignature(any(), any())).thenReturn(true);
InstanceValidator instanceValidator = new InstanceValidator(mock(KeyProvider.class), superModelProvider, null, signer);
assertTrue(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void rejects_invalid_provider_unique_id_in_csr() {
SuperModelProvider superModelProvider = mockSuperModelProvider();
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
InstanceConfirmation instanceConfirmation = createRegisterInstanceConfirmation(applicationId, domain, service);
VespaUniqueInstanceId tamperedId = new VespaUniqueInstanceId(0, "default", "instance", "app", "tenant", "us-north-1", "dev", IdentityType.NODE);
instanceConfirmation.set("sanDNS", tamperedId.asDottedString() + ".instanceid.athenz.dev-us-north-1.vespa.yahoo.cloud");
assertFalse(instanceValidator.isValidInstance(instanceConfirmation));
}
@Test
public void accepts_valid_refresh_requests() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, node.hostname(), List.of(nodeIp));
assertTrue(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
public void rejects_refresh_on_ip_mismatch() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, node.hostname(), List.of(nodeIp, "::ff"));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
public void rejects_invalid_hostname() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, "invalidhostname", List.of(nodeIp));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
@Test
public void rejects_refresh_when_node_is_not_allocated() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
when(nodeRepository.getNodes()).thenReturn(nodeList);
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, nodeList.get(0).hostname(), List.of("::11"));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
private InstanceConfirmation createRegisterInstanceConfirmation(ApplicationId applicationId, String domain, String service) {
VespaUniqueInstanceId vespaUniqueInstanceId = new VespaUniqueInstanceId(0, "default", applicationId.instance().value(), applicationId.application().value(), applicationId.tenant().value(), "us-north-1", "dev", IdentityType.NODE);
SignedIdentityDocument signedIdentityDocument = new SignedIdentityDocument(null,
0,
vespaUniqueInstanceId,
new AthenzService(domain, service),
0,
"localhost",
"localhost",
Instant.now(),
Collections.emptySet(),
IdentityType.NODE);
return createInstanceConfirmation(vespaUniqueInstanceId, domain, service, signedIdentityDocument);
}
private InstanceConfirmation createRefreshInstanceConfirmation(ApplicationId applicationId, String domain, String service, IdentityType identityType, String hostname, List<String> ips) {
VespaUniqueInstanceId vespaUniqueInstanceId = new VespaUniqueInstanceId(0, "default", applicationId.instance().value(), applicationId.application().value(), applicationId.tenant().value(), "us-north-1", "dev", identityType);
InstanceConfirmation instanceConfirmation = createInstanceConfirmation(vespaUniqueInstanceId, domain, service, null);
instanceConfirmation.set("sanIP", String.join(",", ips));
instanceConfirmation.set(InstanceConfirmation.HOSTNAME_ATTRIBUTE, hostname);
return instanceConfirmation;
}
private InstanceConfirmation createInstanceConfirmation(VespaUniqueInstanceId vespaUniqueInstanceId, String domain, String service, SignedIdentityDocument identityDocument) {
InstanceConfirmation instanceConfirmation = new InstanceConfirmation(
"vespa.vespa.cd.provider_dev_us-north-1",
domain,
service,
Optional.ofNullable(identityDocument)
.map(EntityBindingsMapper::toSignedIdentityDocumentEntity)
.orElse(null));
instanceConfirmation.set("sanDNS", vespaUniqueInstanceId.asDottedString() + ".instanceid.athenz.dev-us-north-1.vespa.yahoo.cloud");
return instanceConfirmation;
}
private SuperModelProvider mockSuperModelProvider(ApplicationInfo... appInfos) {
SuperModel superModel = new SuperModel(Stream.of(appInfos)
.collect(Collectors.toMap(
ApplicationInfo::getApplicationId,
Function.identity()
)
));
SuperModelProvider superModelProvider = mock(SuperModelProvider.class);
when(superModelProvider.getSuperModel()).thenReturn(superModel);
return superModelProvider;
}
private ApplicationInfo mockApplicationInfo(ApplicationId appId, int numHosts, List<ServiceInfo> serviceInfo) {
List<HostInfo> hosts = IntStream.range(0, numHosts)
.mapToObj(i -> new HostInfo("host-" + i + "." + appId.toShortString() + ".yahoo.com", serviceInfo))
.collect(Collectors.toList());
Model model = mock(Model.class);
when(model.getHosts()).thenReturn(hosts);
return new ApplicationInfo(appId, 0, model);
}
private List<Node> createNodes(int num) {
MockNodeFlavors flavors = new MockNodeFlavors();
List<Node> nodeList = new ArrayList<>();
for (int i = 0; i < num; i++) {
Node node = Node.create("foo" + i, new IP.Config(Set.of("::1" + i, "::2" + i, "::3" + i), Set.of()), "foo" + i, Optional.empty(), Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant);
nodeList.add(node);
}
return nodeList;
}
private List<Node> allocateNode(List<Node> nodeList, Node node, ApplicationId applicationId) {
nodeList.removeIf(n -> n.id().equals(node.id()));
nodeList.add(node.allocate(applicationId, ClusterMembership.from("container/default/0/0", Version.fromString("6.123.4")), Instant.now()));
return nodeList;
}
} | class InstanceValidatorTest {
private final ApplicationId applicationId = ApplicationId.from("tenant", "application", "instance");
private final String domain = "domain";
private final String service = "service";
@Test
public void application_does_not_exist() {
SuperModelProvider superModelProvider = mockSuperModelProvider();
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_does_not_have_domain_set() {
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.emptyList()));
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_has_wrong_domain() {
ServiceInfo serviceInfo = new ServiceInfo("serviceName", "type", Collections.emptyList(),
Collections.singletonMap(SERVICE_PROPERTIES_DOMAIN_KEY, "not-domain"), "confId", "hostName");
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.singletonList(serviceInfo)));
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
assertFalse(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void application_has_same_domain_and_service() {
Map<String, String> properties = new HashMap<>();
properties.put(SERVICE_PROPERTIES_DOMAIN_KEY, domain);
properties.put(SERVICE_PROPERTIES_SERVICE_KEY, service);
ServiceInfo serviceInfo = new ServiceInfo("serviceName", "type", Collections.emptyList(),
properties, "confId", "hostName");
SuperModelProvider superModelProvider = mockSuperModelProvider(
mockApplicationInfo(applicationId, 5, Collections.singletonList(serviceInfo)));
IdentityDocumentSigner signer = mock(IdentityDocumentSigner.class);
when(signer.hasValidSignature(any(), any())).thenReturn(true);
InstanceValidator instanceValidator = new InstanceValidator(mock(KeyProvider.class), superModelProvider, null, signer);
assertTrue(instanceValidator.isValidInstance(createRegisterInstanceConfirmation(applicationId, domain, service)));
}
@Test
public void rejects_invalid_provider_unique_id_in_csr() {
SuperModelProvider superModelProvider = mockSuperModelProvider();
InstanceValidator instanceValidator = new InstanceValidator(null, superModelProvider, null, null);
InstanceConfirmation instanceConfirmation = createRegisterInstanceConfirmation(applicationId, domain, service);
VespaUniqueInstanceId tamperedId = new VespaUniqueInstanceId(0, "default", "instance", "app", "tenant", "us-north-1", "dev", IdentityType.NODE);
instanceConfirmation.set("sanDNS", tamperedId.asDottedString() + ".instanceid.athenz.dev-us-north-1.vespa.yahoo.cloud");
assertFalse(instanceValidator.isValidInstance(instanceConfirmation));
}
@Test
public void accepts_valid_refresh_requests() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, node.hostname(), List.of(nodeIp));
assertTrue(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
public void rejects_refresh_on_ip_mismatch() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, node.hostname(), List.of(nodeIp, "::ff"));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
public void rejects_invalid_hostname() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
Node node = nodeList.get(0);
nodeList = allocateNode(nodeList, node, applicationId);
when(nodeRepository.getNodes()).thenReturn(nodeList);
String nodeIp = node.ipAddresses().stream().findAny().orElseThrow(() -> new RuntimeException("No ipaddress for mocked node"));
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, "invalidhostname", List.of(nodeIp));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
@Test
@Test
public void rejects_refresh_when_node_is_not_allocated() {
NodeRepository nodeRepository = mock(NodeRepository.class);
InstanceValidator instanceValidator = new InstanceValidator(null, null, nodeRepository);
List<Node> nodeList = createNodes(10);
when(nodeRepository.getNodes()).thenReturn(nodeList);
InstanceConfirmation instanceConfirmation = createRefreshInstanceConfirmation(applicationId, domain, service, IdentityType.NODE, nodeList.get(0).hostname(), List.of("::11"));
assertFalse(instanceValidator.isValidRefresh(instanceConfirmation));
}
private InstanceConfirmation createRegisterInstanceConfirmation(ApplicationId applicationId, String domain, String service) {
VespaUniqueInstanceId vespaUniqueInstanceId = new VespaUniqueInstanceId(0, "default", applicationId.instance().value(), applicationId.application().value(), applicationId.tenant().value(), "us-north-1", "dev", IdentityType.NODE);
SignedIdentityDocument signedIdentityDocument = new SignedIdentityDocument(null,
0,
vespaUniqueInstanceId,
new AthenzService(domain, service),
0,
"localhost",
"localhost",
Instant.now(),
Collections.emptySet(),
IdentityType.NODE);
return createInstanceConfirmation(vespaUniqueInstanceId, domain, service, signedIdentityDocument);
}
private InstanceConfirmation createRefreshInstanceConfirmation(ApplicationId applicationId, String domain, String service, IdentityType identityType, String hostname, List<String> ips) {
VespaUniqueInstanceId vespaUniqueInstanceId = new VespaUniqueInstanceId(0, "default", applicationId.instance().value(), applicationId.application().value(), applicationId.tenant().value(), "us-north-1", "dev", identityType);
InstanceConfirmation instanceConfirmation = createInstanceConfirmation(vespaUniqueInstanceId, domain, service, null);
instanceConfirmation.set("sanIP", String.join(",", ips));
instanceConfirmation.set(InstanceConfirmation.HOSTNAME_ATTRIBUTE, hostname);
return instanceConfirmation;
}
private InstanceConfirmation createInstanceConfirmation(VespaUniqueInstanceId vespaUniqueInstanceId, String domain, String service, SignedIdentityDocument identityDocument) {
InstanceConfirmation instanceConfirmation = new InstanceConfirmation(
"vespa.vespa.cd.provider_dev_us-north-1",
domain,
service,
Optional.ofNullable(identityDocument)
.map(EntityBindingsMapper::toSignedIdentityDocumentEntity)
.orElse(null));
instanceConfirmation.set("sanDNS", vespaUniqueInstanceId.asDottedString() + ".instanceid.athenz.dev-us-north-1.vespa.yahoo.cloud");
return instanceConfirmation;
}
private SuperModelProvider mockSuperModelProvider(ApplicationInfo... appInfos) {
SuperModel superModel = new SuperModel(Stream.of(appInfos)
.collect(Collectors.toMap(
ApplicationInfo::getApplicationId,
Function.identity()
)
));
SuperModelProvider superModelProvider = mock(SuperModelProvider.class);
when(superModelProvider.getSuperModel()).thenReturn(superModel);
return superModelProvider;
}
private ApplicationInfo mockApplicationInfo(ApplicationId appId, int numHosts, List<ServiceInfo> serviceInfo) {
List<HostInfo> hosts = IntStream.range(0, numHosts)
.mapToObj(i -> new HostInfo("host-" + i + "." + appId.toShortString() + ".yahoo.com", serviceInfo))
.collect(Collectors.toList());
Model model = mock(Model.class);
when(model.getHosts()).thenReturn(hosts);
return new ApplicationInfo(appId, 0, model);
}
private List<Node> createNodes(int num) {
MockNodeFlavors flavors = new MockNodeFlavors();
List<Node> nodeList = new ArrayList<>();
for (int i = 0; i < num; i++) {
Node node = Node.create("foo" + i, new IP.Config(Set.of("::1" + i, "::2" + i, "::3" + i), Set.of()), "foo" + i, Optional.empty(), Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant);
nodeList.add(node);
}
return nodeList;
}
private List<Node> allocateNode(List<Node> nodeList, Node node, ApplicationId applicationId) {
nodeList.removeIf(n -> n.id().equals(node.id()));
nodeList.add(node.allocate(applicationId, ClusterMembership.from("container/default/0/0", Version.fromString("6.123.4")), Instant.now()));
return nodeList;
}
} | |
Semantics of this filter statement is unclear to me. Maybe extract the predicate to a separate method where the name is more explanatory? | private AllocatedLoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> loadBalancers) {
this.application = application;
this.zone = zone;
this.list = loadBalancers.stream()
.filter(lb -> lb.state() == LoadBalancer.State.active ||
lb.state() == LoadBalancer.State.reserved)
.collect(Collectors.toUnmodifiableList());
} | lb.state() == LoadBalancer.State.reserved) | private AllocatedLoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> loadBalancers) {
this.application = application;
this.zone = zone;
this.list = loadBalancers.stream()
.filter(AllocatedLoadBalancers::shouldUpdatePolicy)
.collect(Collectors.toUnmodifiableList());
} | class AllocatedLoadBalancers {
private final ApplicationId application;
private final ZoneId zone;
private final List<LoadBalancer> list;
} | class AllocatedLoadBalancers {
private final ApplicationId application;
private final ZoneId zone;
private final List<LoadBalancer> list;
private static boolean shouldUpdatePolicy(LoadBalancer loadBalancer) {
switch (loadBalancer.state()) {
case active:
case reserved:
return true;
}
return false;
}
} |
Done. | private AllocatedLoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> loadBalancers) {
this.application = application;
this.zone = zone;
this.list = loadBalancers.stream()
.filter(lb -> lb.state() == LoadBalancer.State.active ||
lb.state() == LoadBalancer.State.reserved)
.collect(Collectors.toUnmodifiableList());
} | lb.state() == LoadBalancer.State.reserved) | private AllocatedLoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> loadBalancers) {
this.application = application;
this.zone = zone;
this.list = loadBalancers.stream()
.filter(AllocatedLoadBalancers::shouldUpdatePolicy)
.collect(Collectors.toUnmodifiableList());
} | class AllocatedLoadBalancers {
private final ApplicationId application;
private final ZoneId zone;
private final List<LoadBalancer> list;
} | class AllocatedLoadBalancers {
private final ApplicationId application;
private final ZoneId zone;
private final List<LoadBalancer> list;
private static boolean shouldUpdatePolicy(LoadBalancer loadBalancer) {
switch (loadBalancer.state()) {
case active:
case reserved:
return true;
}
return false;
}
} |
Separate constructor would be nice. | public void addUsers(Role role, Collection<UserId> users) {
List<User> userObjs = users.stream()
.map(id -> new User(id.value(), id.value(), null, null))
.collect(Collectors.toList());
memberships.get(role).addAll(userObjs);
} | .map(id -> new User(id.value(), id.value(), null, null)) | public void addUsers(Role role, Collection<UserId> users) {
List<User> userObjs = users.stream()
.map(id -> new User(id.value(), id.value(), null, null))
.collect(Collectors.toList());
memberships.get(role).addAll(userObjs);
} | class MockUserManagement implements UserManagement {
private final Map<Role, Set<User>> memberships = new HashMap<>();
@Override
public void createRole(Role role) {
if (memberships.containsKey(role))
throw new IllegalArgumentException(role + " already exists.");
memberships.put(role, new HashSet<>());
}
@Override
public void deleteRole(Role role) {
memberships.remove(role);
}
@Override
@Override
public void removeUsers(Role role, Collection<UserId> users) {
memberships.get(role).removeAll(users);
}
@Override
public List<User> listUsers(Role role) {
return List.copyOf(memberships.get(role));
}
} | class MockUserManagement implements UserManagement {
private final Map<Role, Set<User>> memberships = new HashMap<>();
@Override
public void createRole(Role role) {
if (memberships.containsKey(role))
throw new IllegalArgumentException(role + " already exists.");
memberships.put(role, new HashSet<>());
}
@Override
public void deleteRole(Role role) {
memberships.remove(role);
}
@Override
@Override
public void removeUsers(Role role, Collection<UserId> users) {
memberships.get(role).removeAll(users);
}
@Override
public List<User> listUsers(Role role) {
return List.copyOf(memberships.get(role));
}
} |
We should fail on the next line if the exception was not thrown. | public void tensor_modify_update_on_non_tensor_field_throws() {
try {
JsonReader reader = createReader(inputJson("{ 'update': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': {",
" 'modify': {} }}}"));
reader.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::doc1");
}
catch (IllegalArgumentException e) {
assertEquals("Error in 'something': A modify update can only be applied to tensor fields. Field 'something' is of type 'string'",
Exceptions.toMessageString(e));
}
} | reader.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::doc1"); | public void tensor_modify_update_on_non_tensor_field_throws() {
try {
JsonReader reader = createReader(inputJson("{ 'update': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': {",
" 'modify': {} }}}"));
reader.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::doc1");
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Error in 'something': A modify update can only be applied to tensor fields. Field 'something' is of type 'string'",
Exceptions.toMessageString(e));
}
} | class JsonReaderTestCase {
private DocumentTypeManager types;
private JsonFactory parserFactory;
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void setUp() throws Exception {
parserFactory = new JsonFactory();
types = new DocumentTypeManager();
{
DocumentType x = new DocumentType("smoke");
x.addField(new Field("something", DataType.STRING));
x.addField(new Field("nalle", DataType.STRING));
x.addField(new Field("int1", DataType.INT));
x.addField(new Field("flag", DataType.BOOL));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("mirrors");
StructDataType woo = new StructDataType("woo");
woo.addField(new Field("sandra", DataType.STRING));
woo.addField(new Field("cloud", DataType.STRING));
x.addField(new Field("skuggsjaa", woo));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testarray");
DataType d = new ArrayDataType(DataType.STRING);
x.addField(new Field("actualarray", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testset");
DataType d = new WeightedSetDataType(DataType.STRING, true, true);
x.addField(new Field("actualset", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testmap");
DataType d = new MapDataType(DataType.STRING, DataType.STRING);
x.addField(new Field("actualmap", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testraw");
DataType d = DataType.RAW;
x.addField(new Field("actualraw", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testMapStringToArrayOfInt");
DataType value = new ArrayDataType(DataType.INT);
DataType d = new MapDataType(DataType.STRING, value);
x.addField(new Field("actualMapStringToArrayOfInt", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testsinglepos");
DataType d = PositionDataType.INSTANCE;
x.addField(new Field("singlepos", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testtensor");
x.addField(new Field("sparse_tensor",
new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build())));
x.addField(new Field("dense_tensor",
new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build())));
x.addField(new Field("dense_unbound_tensor",
new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build())));
x.addField(new Field("mixed_tensor",
new TensorDataType(new TensorType.Builder().mapped("x").indexed("y", 3).build())));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testpredicate");
x.addField(new Field("boolean", DataType.PREDICATE));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testint");
x.addField(new Field("integerfield", DataType.INT));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testnull");
x.addField(new Field("intfield", DataType.INT));
x.addField(new Field("stringfield", DataType.STRING));
x.addField(new Field("arrayfield", new ArrayDataType(DataType.STRING)));
x.addField(new Field("weightedsetfield", new WeightedSetDataType(DataType.STRING, true, true)));
x.addField(new Field("mapfield", new MapDataType(DataType.STRING, DataType.STRING)));
x.addField(new Field("tensorfield", new TensorDataType(new TensorType.Builder().indexed("x").build())));
types.registerDocumentType(x);
}
}
@After
public void tearDown() throws Exception {
types = null;
parserFactory = null;
exception = ExpectedException.none();
}
private JsonReader createReader(String jsonInput) {
InputStream input = new ByteArrayInputStream(Utf8.toBytes(jsonInput));
return new JsonReader(types, input, parserFactory);
}
@Test
public void readSingleDocumentPut() {
JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" }",
"}"));
DocumentPut put = (DocumentPut) r.readSingleDocument(DocumentParser.SupportedOperation.PUT,
"id:unittest:smoke::doc1");
smokeTestDoc(put.getDocument());
}
@Test
public final void readSingleDocumentUpdate() {
JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',",
" 'fields': {",
" 'something': {",
" 'assign': 'orOther' }}}"));
DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::whee");
FieldUpdate f = doc.getFieldUpdate("something");
assertEquals(1, f.size());
assertTrue(f.getValueUpdate(0) instanceof AssignValueUpdate);
}
@Test
public void readClearField() {
JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',",
" 'fields': {",
" 'int1': {",
" 'assign': null }}}"));
DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::whee");
FieldUpdate f = doc.getFieldUpdate("int1");
assertEquals(1, f.size());
assertTrue(f.getValueUpdate(0) instanceof ClearValueUpdate);
assertNull(f.getValueUpdate(0).getValue());
}
@Test
public void smokeTest() throws IOException {
JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
DocumentType docType = r.readDocumentType(parseInfo.documentId);
DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId));
new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put);
smokeTestDoc(put.getDocument());
}
@Test
public void docIdLookaheadTest() throws IOException {
JsonReader r = createReader(inputJson(
"{ 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" },",
" 'put': 'id:unittest:smoke::doc1'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
DocumentType docType = r.readDocumentType(parseInfo.documentId);
DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId));
new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put);
smokeTestDoc(put.getDocument());
} | class JsonReaderTestCase {
private DocumentTypeManager types;
private JsonFactory parserFactory;
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void setUp() throws Exception {
parserFactory = new JsonFactory();
types = new DocumentTypeManager();
{
DocumentType x = new DocumentType("smoke");
x.addField(new Field("something", DataType.STRING));
x.addField(new Field("nalle", DataType.STRING));
x.addField(new Field("int1", DataType.INT));
x.addField(new Field("flag", DataType.BOOL));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("mirrors");
StructDataType woo = new StructDataType("woo");
woo.addField(new Field("sandra", DataType.STRING));
woo.addField(new Field("cloud", DataType.STRING));
x.addField(new Field("skuggsjaa", woo));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testarray");
DataType d = new ArrayDataType(DataType.STRING);
x.addField(new Field("actualarray", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testset");
DataType d = new WeightedSetDataType(DataType.STRING, true, true);
x.addField(new Field("actualset", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testmap");
DataType d = new MapDataType(DataType.STRING, DataType.STRING);
x.addField(new Field("actualmap", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testraw");
DataType d = DataType.RAW;
x.addField(new Field("actualraw", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testMapStringToArrayOfInt");
DataType value = new ArrayDataType(DataType.INT);
DataType d = new MapDataType(DataType.STRING, value);
x.addField(new Field("actualMapStringToArrayOfInt", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testsinglepos");
DataType d = PositionDataType.INSTANCE;
x.addField(new Field("singlepos", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testtensor");
x.addField(new Field("sparse_tensor",
new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build())));
x.addField(new Field("dense_tensor",
new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build())));
x.addField(new Field("dense_unbound_tensor",
new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build())));
x.addField(new Field("mixed_tensor",
new TensorDataType(new TensorType.Builder().mapped("x").indexed("y", 3).build())));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testpredicate");
x.addField(new Field("boolean", DataType.PREDICATE));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testint");
x.addField(new Field("integerfield", DataType.INT));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testnull");
x.addField(new Field("intfield", DataType.INT));
x.addField(new Field("stringfield", DataType.STRING));
x.addField(new Field("arrayfield", new ArrayDataType(DataType.STRING)));
x.addField(new Field("weightedsetfield", new WeightedSetDataType(DataType.STRING, true, true)));
x.addField(new Field("mapfield", new MapDataType(DataType.STRING, DataType.STRING)));
x.addField(new Field("tensorfield", new TensorDataType(new TensorType.Builder().indexed("x").build())));
types.registerDocumentType(x);
}
}
@After
public void tearDown() throws Exception {
types = null;
parserFactory = null;
exception = ExpectedException.none();
}
private JsonReader createReader(String jsonInput) {
InputStream input = new ByteArrayInputStream(Utf8.toBytes(jsonInput));
return new JsonReader(types, input, parserFactory);
}
@Test
public void readSingleDocumentPut() {
JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" }",
"}"));
DocumentPut put = (DocumentPut) r.readSingleDocument(DocumentParser.SupportedOperation.PUT,
"id:unittest:smoke::doc1");
smokeTestDoc(put.getDocument());
}
@Test
public final void readSingleDocumentUpdate() {
JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',",
" 'fields': {",
" 'something': {",
" 'assign': 'orOther' }}}"));
DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::whee");
FieldUpdate f = doc.getFieldUpdate("something");
assertEquals(1, f.size());
assertTrue(f.getValueUpdate(0) instanceof AssignValueUpdate);
}
@Test
public void readClearField() {
JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',",
" 'fields': {",
" 'int1': {",
" 'assign': null }}}"));
DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::whee");
FieldUpdate f = doc.getFieldUpdate("int1");
assertEquals(1, f.size());
assertTrue(f.getValueUpdate(0) instanceof ClearValueUpdate);
assertNull(f.getValueUpdate(0).getValue());
}
@Test
public void smokeTest() throws IOException {
JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
DocumentType docType = r.readDocumentType(parseInfo.documentId);
DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId));
new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put);
smokeTestDoc(put.getDocument());
}
@Test
public void docIdLookaheadTest() throws IOException {
JsonReader r = createReader(inputJson(
"{ 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" },",
" 'put': 'id:unittest:smoke::doc1'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
DocumentType docType = r.readDocumentType(parseInfo.documentId);
DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId));
new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put);
smokeTestDoc(put.getDocument());
} |
Good catch - thanks | public void tensor_modify_update_on_non_tensor_field_throws() {
try {
JsonReader reader = createReader(inputJson("{ 'update': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': {",
" 'modify': {} }}}"));
reader.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::doc1");
}
catch (IllegalArgumentException e) {
assertEquals("Error in 'something': A modify update can only be applied to tensor fields. Field 'something' is of type 'string'",
Exceptions.toMessageString(e));
}
} | reader.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::doc1"); | public void tensor_modify_update_on_non_tensor_field_throws() {
try {
JsonReader reader = createReader(inputJson("{ 'update': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': {",
" 'modify': {} }}}"));
reader.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::doc1");
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("Error in 'something': A modify update can only be applied to tensor fields. Field 'something' is of type 'string'",
Exceptions.toMessageString(e));
}
} | class JsonReaderTestCase {
private DocumentTypeManager types;
private JsonFactory parserFactory;
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void setUp() throws Exception {
parserFactory = new JsonFactory();
types = new DocumentTypeManager();
{
DocumentType x = new DocumentType("smoke");
x.addField(new Field("something", DataType.STRING));
x.addField(new Field("nalle", DataType.STRING));
x.addField(new Field("int1", DataType.INT));
x.addField(new Field("flag", DataType.BOOL));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("mirrors");
StructDataType woo = new StructDataType("woo");
woo.addField(new Field("sandra", DataType.STRING));
woo.addField(new Field("cloud", DataType.STRING));
x.addField(new Field("skuggsjaa", woo));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testarray");
DataType d = new ArrayDataType(DataType.STRING);
x.addField(new Field("actualarray", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testset");
DataType d = new WeightedSetDataType(DataType.STRING, true, true);
x.addField(new Field("actualset", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testmap");
DataType d = new MapDataType(DataType.STRING, DataType.STRING);
x.addField(new Field("actualmap", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testraw");
DataType d = DataType.RAW;
x.addField(new Field("actualraw", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testMapStringToArrayOfInt");
DataType value = new ArrayDataType(DataType.INT);
DataType d = new MapDataType(DataType.STRING, value);
x.addField(new Field("actualMapStringToArrayOfInt", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testsinglepos");
DataType d = PositionDataType.INSTANCE;
x.addField(new Field("singlepos", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testtensor");
x.addField(new Field("sparse_tensor",
new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build())));
x.addField(new Field("dense_tensor",
new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build())));
x.addField(new Field("dense_unbound_tensor",
new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build())));
x.addField(new Field("mixed_tensor",
new TensorDataType(new TensorType.Builder().mapped("x").indexed("y", 3).build())));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testpredicate");
x.addField(new Field("boolean", DataType.PREDICATE));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testint");
x.addField(new Field("integerfield", DataType.INT));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testnull");
x.addField(new Field("intfield", DataType.INT));
x.addField(new Field("stringfield", DataType.STRING));
x.addField(new Field("arrayfield", new ArrayDataType(DataType.STRING)));
x.addField(new Field("weightedsetfield", new WeightedSetDataType(DataType.STRING, true, true)));
x.addField(new Field("mapfield", new MapDataType(DataType.STRING, DataType.STRING)));
x.addField(new Field("tensorfield", new TensorDataType(new TensorType.Builder().indexed("x").build())));
types.registerDocumentType(x);
}
}
@After
public void tearDown() throws Exception {
types = null;
parserFactory = null;
exception = ExpectedException.none();
}
private JsonReader createReader(String jsonInput) {
InputStream input = new ByteArrayInputStream(Utf8.toBytes(jsonInput));
return new JsonReader(types, input, parserFactory);
}
@Test
public void readSingleDocumentPut() {
JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" }",
"}"));
DocumentPut put = (DocumentPut) r.readSingleDocument(DocumentParser.SupportedOperation.PUT,
"id:unittest:smoke::doc1");
smokeTestDoc(put.getDocument());
}
@Test
public final void readSingleDocumentUpdate() {
JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',",
" 'fields': {",
" 'something': {",
" 'assign': 'orOther' }}}"));
DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::whee");
FieldUpdate f = doc.getFieldUpdate("something");
assertEquals(1, f.size());
assertTrue(f.getValueUpdate(0) instanceof AssignValueUpdate);
}
@Test
public void readClearField() {
JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',",
" 'fields': {",
" 'int1': {",
" 'assign': null }}}"));
DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::whee");
FieldUpdate f = doc.getFieldUpdate("int1");
assertEquals(1, f.size());
assertTrue(f.getValueUpdate(0) instanceof ClearValueUpdate);
assertNull(f.getValueUpdate(0).getValue());
}
@Test
public void smokeTest() throws IOException {
JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
DocumentType docType = r.readDocumentType(parseInfo.documentId);
DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId));
new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put);
smokeTestDoc(put.getDocument());
}
@Test
public void docIdLookaheadTest() throws IOException {
JsonReader r = createReader(inputJson(
"{ 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" },",
" 'put': 'id:unittest:smoke::doc1'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
DocumentType docType = r.readDocumentType(parseInfo.documentId);
DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId));
new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put);
smokeTestDoc(put.getDocument());
} | class JsonReaderTestCase {
private DocumentTypeManager types;
private JsonFactory parserFactory;
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void setUp() throws Exception {
parserFactory = new JsonFactory();
types = new DocumentTypeManager();
{
DocumentType x = new DocumentType("smoke");
x.addField(new Field("something", DataType.STRING));
x.addField(new Field("nalle", DataType.STRING));
x.addField(new Field("int1", DataType.INT));
x.addField(new Field("flag", DataType.BOOL));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("mirrors");
StructDataType woo = new StructDataType("woo");
woo.addField(new Field("sandra", DataType.STRING));
woo.addField(new Field("cloud", DataType.STRING));
x.addField(new Field("skuggsjaa", woo));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testarray");
DataType d = new ArrayDataType(DataType.STRING);
x.addField(new Field("actualarray", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testset");
DataType d = new WeightedSetDataType(DataType.STRING, true, true);
x.addField(new Field("actualset", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testmap");
DataType d = new MapDataType(DataType.STRING, DataType.STRING);
x.addField(new Field("actualmap", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testraw");
DataType d = DataType.RAW;
x.addField(new Field("actualraw", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testMapStringToArrayOfInt");
DataType value = new ArrayDataType(DataType.INT);
DataType d = new MapDataType(DataType.STRING, value);
x.addField(new Field("actualMapStringToArrayOfInt", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testsinglepos");
DataType d = PositionDataType.INSTANCE;
x.addField(new Field("singlepos", d));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testtensor");
x.addField(new Field("sparse_tensor",
new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build())));
x.addField(new Field("dense_tensor",
new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build())));
x.addField(new Field("dense_unbound_tensor",
new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build())));
x.addField(new Field("mixed_tensor",
new TensorDataType(new TensorType.Builder().mapped("x").indexed("y", 3).build())));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testpredicate");
x.addField(new Field("boolean", DataType.PREDICATE));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testint");
x.addField(new Field("integerfield", DataType.INT));
types.registerDocumentType(x);
}
{
DocumentType x = new DocumentType("testnull");
x.addField(new Field("intfield", DataType.INT));
x.addField(new Field("stringfield", DataType.STRING));
x.addField(new Field("arrayfield", new ArrayDataType(DataType.STRING)));
x.addField(new Field("weightedsetfield", new WeightedSetDataType(DataType.STRING, true, true)));
x.addField(new Field("mapfield", new MapDataType(DataType.STRING, DataType.STRING)));
x.addField(new Field("tensorfield", new TensorDataType(new TensorType.Builder().indexed("x").build())));
types.registerDocumentType(x);
}
}
@After
public void tearDown() throws Exception {
types = null;
parserFactory = null;
exception = ExpectedException.none();
}
private JsonReader createReader(String jsonInput) {
InputStream input = new ByteArrayInputStream(Utf8.toBytes(jsonInput));
return new JsonReader(types, input, parserFactory);
}
@Test
public void readSingleDocumentPut() {
JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" }",
"}"));
DocumentPut put = (DocumentPut) r.readSingleDocument(DocumentParser.SupportedOperation.PUT,
"id:unittest:smoke::doc1");
smokeTestDoc(put.getDocument());
}
@Test
public final void readSingleDocumentUpdate() {
JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',",
" 'fields': {",
" 'something': {",
" 'assign': 'orOther' }}}"));
DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::whee");
FieldUpdate f = doc.getFieldUpdate("something");
assertEquals(1, f.size());
assertTrue(f.getValueUpdate(0) instanceof AssignValueUpdate);
}
@Test
public void readClearField() {
JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',",
" 'fields': {",
" 'int1': {",
" 'assign': null }}}"));
DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentParser.SupportedOperation.UPDATE, "id:unittest:smoke::whee");
FieldUpdate f = doc.getFieldUpdate("int1");
assertEquals(1, f.size());
assertTrue(f.getValueUpdate(0) instanceof ClearValueUpdate);
assertNull(f.getValueUpdate(0).getValue());
}
@Test
public void smokeTest() throws IOException {
JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',",
" 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
DocumentType docType = r.readDocumentType(parseInfo.documentId);
DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId));
new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put);
smokeTestDoc(put.getDocument());
}
@Test
public void docIdLookaheadTest() throws IOException {
JsonReader r = createReader(inputJson(
"{ 'fields': {",
" 'something': 'smoketest',",
" 'flag': true,",
" 'nalle': 'bamse'",
" },",
" 'put': 'id:unittest:smoke::doc1'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
DocumentType docType = r.readDocumentType(parseInfo.documentId);
DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId));
new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put);
smokeTestDoc(put.getDocument());
} |
Default content-type is plaintext? | public void render(OutputStream outputStream) throws IOException {
outputStream.write(data);
} | outputStream.write(data); | public void render(OutputStream outputStream) throws IOException {
outputStream.write(data);
} | class TextResponse extends HttpResponse {
private final byte[] data;
public TextResponse(int code, String data) {
super(code);
this.data = data.getBytes(Charset.forName(DEFAULT_CHARACTER_ENCODING));
}
@Override
} | class TextResponse extends HttpResponse {
private final byte[] data;
public TextResponse(int code, String data) {
super(code);
this.data = data.getBytes(Charset.forName(DEFAULT_CHARACTER_ENCODING));
}
@Override
} |
Consider including the cause in exception | public String serialize() {
var writer = new StringWriter();
try {
TextFormat.write004(writer, this);
} catch (IOException e) {
log.log(Level.WARNING, "Got exception when rendering metrics:", e);
throw new PrometheusRenderingException("Could not render metrics. Check the log for details.");
}
return writer.toString();
} | throw new PrometheusRenderingException("Could not render metrics. Check the log for details."); | public String serialize() {
var writer = new StringWriter();
try {
TextFormat.write004(writer, this);
} catch (IOException e) {
log.log(Level.WARNING, "Got exception when rendering metrics:", e);
throw new PrometheusRenderingException("Could not render metrics. Check the log for details.");
}
return writer.toString();
} | class PrometheusModel implements Enumeration<Collector.MetricFamilySamples> {
private static Logger log = Logger.getLogger(PrometheusModel.class.getName());
private final Iterator<Collector.MetricFamilySamples> metricFamilySamplesIterator;
PrometheusModel(List<Collector.MetricFamilySamples> metricFamilySamples) {
this.metricFamilySamplesIterator = metricFamilySamples.iterator();
}
@Override
public boolean hasMoreElements() {
return metricFamilySamplesIterator.hasNext();
}
@Override
public Collector.MetricFamilySamples nextElement() {
return metricFamilySamplesIterator.next();
}
} | class PrometheusModel implements Enumeration<Collector.MetricFamilySamples> {
private static Logger log = Logger.getLogger(PrometheusModel.class.getName());
private final Iterator<Collector.MetricFamilySamples> metricFamilySamplesIterator;
PrometheusModel(List<Collector.MetricFamilySamples> metricFamilySamples) {
this.metricFamilySamplesIterator = metricFamilySamples.iterator();
}
@Override
public boolean hasMoreElements() {
return metricFamilySamplesIterator.hasNext();
}
@Override
public Collector.MetricFamilySamples nextElement() {
return metricFamilySamplesIterator.next();
}
} |
Yes, default mimetype is text/plain in HttpResponse. We can probably never change this. | public void render(OutputStream outputStream) throws IOException {
outputStream.write(data);
} | outputStream.write(data); | public void render(OutputStream outputStream) throws IOException {
outputStream.write(data);
} | class TextResponse extends HttpResponse {
private final byte[] data;
public TextResponse(int code, String data) {
super(code);
this.data = data.getBytes(Charset.forName(DEFAULT_CHARACTER_ENCODING));
}
@Override
} | class TextResponse extends HttpResponse {
private final byte[] data;
public TextResponse(int code, String data) {
super(code);
this.data = data.getBytes(Charset.forName(DEFAULT_CHARACTER_ENCODING));
}
@Override
} |
It's included in the log, but we don't want to show it in the response. | public String serialize() {
var writer = new StringWriter();
try {
TextFormat.write004(writer, this);
} catch (IOException e) {
log.log(Level.WARNING, "Got exception when rendering metrics:", e);
throw new PrometheusRenderingException("Could not render metrics. Check the log for details.");
}
return writer.toString();
} | throw new PrometheusRenderingException("Could not render metrics. Check the log for details."); | public String serialize() {
var writer = new StringWriter();
try {
TextFormat.write004(writer, this);
} catch (IOException e) {
log.log(Level.WARNING, "Got exception when rendering metrics:", e);
throw new PrometheusRenderingException("Could not render metrics. Check the log for details.");
}
return writer.toString();
} | class PrometheusModel implements Enumeration<Collector.MetricFamilySamples> {
private static Logger log = Logger.getLogger(PrometheusModel.class.getName());
private final Iterator<Collector.MetricFamilySamples> metricFamilySamplesIterator;
PrometheusModel(List<Collector.MetricFamilySamples> metricFamilySamples) {
this.metricFamilySamplesIterator = metricFamilySamples.iterator();
}
@Override
public boolean hasMoreElements() {
return metricFamilySamplesIterator.hasNext();
}
@Override
public Collector.MetricFamilySamples nextElement() {
return metricFamilySamplesIterator.next();
}
} | class PrometheusModel implements Enumeration<Collector.MetricFamilySamples> {
private static Logger log = Logger.getLogger(PrometheusModel.class.getName());
private final Iterator<Collector.MetricFamilySamples> metricFamilySamplesIterator;
PrometheusModel(List<Collector.MetricFamilySamples> metricFamilySamples) {
this.metricFamilySamplesIterator = metricFamilySamples.iterator();
}
@Override
public boolean hasMoreElements() {
return metricFamilySamplesIterator.hasNext();
}
@Override
public Collector.MetricFamilySamples nextElement() {
return metricFamilySamplesIterator.next();
}
} |
Will this code be exercised for currently provisioned hosts too, and that are using the old thin pool sizes? How does that work? AFAIK, only if we're currently enlarging (rounding up) the node resources will `prepare` now instead require a larger disk, which may involve... migration I guess? | private long getThinPoolSize(NodeResources.StorageType storageType) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning()) {
if (zone().system() == SystemName.Public)
return 12;
else
return 24;
}
return 4;
} | return 12; | private long getThinPoolSize(NodeResources.StorageType storageType) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning()) {
if (zone().system() == SystemName.Public)
return 12;
else
return 24;
}
return 4;
} | class NodeResourceLimits {
private final NodeRepository nodeRepository;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) {
if (requested.isUnspecified()) return;
if (requested.vcpu() < minAdvertisedVcpu())
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu());
if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type()))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type()));
if (requested.diskGb() < minAdvertisedDiskGb(requested))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested));
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) {
return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository),
cluster.type());
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) {
if (realResources.isUnspecified()) return true;
if (realResources.vcpu() < minRealVcpu()) return false;
if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
}
public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType) {
if (requested.isUnspecified()) return requested;
return requested.withVcpu(Math.max(minAdvertisedVcpu(), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested), requested.diskGb()));
}
private double minAdvertisedVcpu() {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
return 0.5;
}
private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) {
if (zone().system() == SystemName.dev) return 1;
if (clusterType == ClusterSpec.Type.admin) return 2;
return 4;
}
private double minAdvertisedDiskGb(NodeResources requested) {
return minRealDiskGb() + getThinPoolSize(requested.storageType());
}
private double minRealVcpu() { return minAdvertisedVcpu(); }
private double minRealMemoryGb(ClusterSpec.Type clusterType) {
return minAdvertisedMemoryGb(clusterType) - 1.7;
}
private double minRealDiskGb() { return 6; }
private Zone zone() { return nodeRepository.zone(); }
private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) {
if ( ! unit.isEmpty())
unit = " " + unit;
String message = String.format(Locale.ENGLISH,
"%s cluster '%s': " + type + " " + resource +
" size is %.2f%s but must be at least %.2f%s",
cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit);
throw new IllegalArgumentException(message);
}
} | class NodeResourceLimits {
private final NodeRepository nodeRepository;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) {
if (requested.isUnspecified()) return;
if (requested.vcpu() < minAdvertisedVcpu())
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu());
if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type()))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type()));
if (requested.diskGb() < minAdvertisedDiskGb(requested))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested));
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) {
return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository),
cluster.type());
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) {
if (realResources.isUnspecified()) return true;
if (realResources.vcpu() < minRealVcpu()) return false;
if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
}
public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType) {
if (requested.isUnspecified()) return requested;
return requested.withVcpu(Math.max(minAdvertisedVcpu(), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested), requested.diskGb()));
}
private double minAdvertisedVcpu() {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
return 0.5;
}
private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) {
if (zone().system() == SystemName.dev) return 1;
if (clusterType == ClusterSpec.Type.admin) return 2;
return 4;
}
private double minAdvertisedDiskGb(NodeResources requested) {
return minRealDiskGb() + getThinPoolSize(requested.storageType());
}
private double minRealVcpu() { return minAdvertisedVcpu(); }
private double minRealMemoryGb(ClusterSpec.Type clusterType) {
return minAdvertisedMemoryGb(clusterType) - 1.7;
}
private double minRealDiskGb() { return 6; }
private Zone zone() { return nodeRepository.zone(); }
private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) {
if ( ! unit.isEmpty())
unit = " " + unit;
String message = String.format(Locale.ENGLISH,
"%s cluster '%s': " + type + " " + resource +
" size is %.2f%s but must be at least %.2f%s",
cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit);
throw new IllegalArgumentException(message);
}
} |
- Fix indentation, `cluster` should be a level in compared to - Or, create the `Runnable` in a separate line and submit it. E.g.: ```java var clusters = getClustersOfApplication(applicationId); var clusterMetrics = new ConcurrentHashMap<ClusterInfo, MetricsAggregator>(); Runnable retrieveMetricsJob = () -> { clusters.parallelStream().forEach(cluster -> { var metrics = MetricsRetriever.requestMetricsForCluster(cluster); clusterMetrics.put(cluster, metrics); }); }; var threadPool = new ForkJoinPool(5); threadPool.submit(retrieveMetricsJob); threadPool.shutdown(); try { threadPool.awaitTermination(1, TimeUnit.MINUTES); } catch (InterruptedException e) { throw new RuntimeException(e); } return new MetricsResponse(200, applicationId, clusterMetrics); ``` | public MetricsResponse getMetrics(ApplicationId applicationId) {
var clusters = getClustersOfApplication(applicationId);
var clusterMetrics = new ConcurrentHashMap<ClusterInfo, MetricsAggregator>();
ForkJoinPool pool = new ForkJoinPool(5);
pool.submit(() ->
clusters.parallelStream().forEach(cluster -> {
var metrics = MetricsRetriever.requestMetricsForCluster(cluster);
clusterMetrics.put(cluster, metrics);
}));
pool.shutdown();
try {
pool.awaitTermination(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new MetricsResponse(200, applicationId, clusterMetrics);
} | clusters.parallelStream().forEach(cluster -> { | public MetricsResponse getMetrics(ApplicationId applicationId) {
Application application = getApplication(applicationId);
return ClusterMetricsRetriever.getMetrics(application);
} | class ApplicationRepository implements com.yahoo.config.provision.Deployer {
private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName());
private final TenantRepository tenantRepository;
private final Optional<Provisioner> hostProvisioner;
private final Optional<InfraDeployer> infraDeployer;
private final ConfigConvergenceChecker convergeChecker;
private final HttpProxy httpProxy;
private final Clock clock;
private final DeployLogger logger = new SilentDeployLogger();
private final ConfigserverConfig configserverConfig;
private final FileDistributionStatus fileDistributionStatus;
private final Orchestrator orchestrator;
private final LogRetriever logRetriever;
@Inject
public ApplicationRepository(TenantRepository tenantRepository,
HostProvisionerProvider hostProvisionerProvider,
InfraDeployerProvider infraDeployerProvider,
ConfigConvergenceChecker configConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig,
Orchestrator orchestrator) {
this(tenantRepository,
hostProvisionerProvider.getHostProvisioner(),
infraDeployerProvider.getInfraDeployer(),
configConvergenceChecker,
httpProxy,
configserverConfig,
orchestrator,
new LogRetriever(),
new FileDistributionStatus(),
Clock.systemUTC());
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
Clock clock) {
this(tenantRepository,
hostProvisioner,
orchestrator,
new ConfigserverConfig(new ConfigserverConfig.Builder()),
new LogRetriever(),
clock);
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
LogRetriever logRetriever,
Clock clock) {
this(tenantRepository,
hostProvisioner,
orchestrator,
new ConfigserverConfig(new ConfigserverConfig.Builder()),
logRetriever,
clock);
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
ConfigserverConfig configserverConfig,
LogRetriever logRetriever,
Clock clock) {
this(tenantRepository,
Optional.of(hostProvisioner),
Optional.empty(),
new ConfigConvergenceChecker(),
new HttpProxy(new SimpleHttpFetcher()),
configserverConfig,
orchestrator,
logRetriever,
new FileDistributionStatus(),
clock);
}
private ApplicationRepository(TenantRepository tenantRepository,
Optional<Provisioner> hostProvisioner,
Optional<InfraDeployer> infraDeployer,
ConfigConvergenceChecker configConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig,
Orchestrator orchestrator,
LogRetriever logRetriever,
FileDistributionStatus fileDistributionStatus,
Clock clock) {
this.tenantRepository = tenantRepository;
this.hostProvisioner = hostProvisioner;
this.infraDeployer = infraDeployer;
this.convergeChecker = configConvergenceChecker;
this.httpProxy = httpProxy;
this.configserverConfig = configserverConfig;
this.orchestrator = orchestrator;
this.logRetriever = logRetriever;
this.fileDistributionStatus = fileDistributionStatus;
this.clock = clock;
}
public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) {
validateThatLocalSessionIsNotActive(tenant, sessionId);
LocalSession session = getLocalSession(tenant, sessionId);
ApplicationId applicationId = prepareParams.getApplicationId();
Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId);
Slime deployLog = createDeployLog();
DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId);
ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now);
logConfigChangeActions(actions, logger);
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. ");
return new PrepareResult(sessionId, actions, deployLog);
}
public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams,
boolean ignoreSessionStaleFailure, Instant now) {
PrepareResult result = prepare(tenant, sessionId, prepareParams, now);
activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure);
return result;
}
public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) {
return deploy(in, prepareParams, false, clock.instant());
}
public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams,
boolean ignoreSessionStaleFailure, Instant now) {
File tempDir = Files.createTempDir();
PrepareResult prepareResult;
try {
prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now);
} finally {
cleanupTempDirectory(tempDir);
}
return prepareResult;
}
public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) {
return deploy(applicationPackage, prepareParams, false, Instant.now());
}
public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams,
boolean ignoreSessionStaleFailure, Instant now) {
ApplicationId applicationId = prepareParams.getApplicationId();
long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage);
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
return prepareAndActivate(tenant, sessionId, prepareParams, ignoreSessionStaleFailure, now);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) {
return deployFromLocalActive(application, false);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @param bootstrap the deployment is done when bootstrapping
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application,
boolean bootstrap) {
return deployFromLocalActive(application,
Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)),
bootstrap);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @param timeout the timeout to use for each individual deployment operation
* @param bootstrap the deployment is done when bootstrapping
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application,
Duration timeout,
boolean bootstrap) {
Optional<com.yahoo.config.provision.Deployment> infraDeployment = infraDeployer.flatMap(d -> d.getDeployment(application));
if (infraDeployment.isPresent()) return infraDeployment;
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
LocalSession activeSession = getActiveSession(tenant, application);
if (activeSession == null) return Optional.empty();
TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout);
LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, true, timeoutBudget);
tenant.getLocalSessionRepo().addSession(newSession);
Version version = decideVersion(application, zone().environment(), newSession.getVespaVersion(), bootstrap);
return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock,
false /* don't validate as this is already deployed */, version,
bootstrap));
}
@Override
public Optional<Instant> lastDeployTime(ApplicationId application) {
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
LocalSession activeSession = getActiveSession(tenant, application);
if (activeSession == null) return Optional.empty();
return Optional.of(Instant.ofEpochSecond(activeSession.getCreateTime()));
}
public ApplicationId activate(Tenant tenant,
long sessionId,
TimeoutBudget timeoutBudget,
boolean ignoreSessionStaleFailure) {
LocalSession localSession = getLocalSession(tenant, sessionId);
Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft());
deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure);
deployment.activate();
return localSession.getApplicationId();
}
private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) {
return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false);
}
/**
* Deletes an application
*
* @return true if the application was found and deleted, false if it was not present
* @throws RuntimeException if the delete transaction fails. This method is exception safe.
*/
public boolean delete(ApplicationId applicationId) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) return false;
TenantApplications tenantApplications = tenant.getApplicationRepo();
try (Lock lock = tenantApplications.lock(applicationId)) {
if ( ! tenantApplications.exists(applicationId)) return false;
boolean sessionDeleted = tenantApplications.activeSessionOf(applicationId).map(sessionId -> {
RemoteSession remoteSession = getRemoteSession(tenant, sessionId);
remoteSession.createDeleteTransaction().commit();
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted");
Duration waitTime = Duration.ofSeconds(60);
if (localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) {
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted");
return true;
} else {
log.log(LogLevel.ERROR, TenantRepository.logPre(applicationId) + "Session " + sessionId + " was not deleted (waited " + waitTime + ")");
return false;
}
}).orElse(true);
NestedTransaction transaction = new NestedTransaction();
transaction.add(new Rotations(tenant.getCurator(), tenant.getPath()).delete(applicationId));
transaction.add(tenantApplications.createDeleteTransaction(applicationId));
hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId));
transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId));
transaction.commit();
return sessionDeleted;
}
}
public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) {
String relativePath = "clustercontroller-status/" + pathSuffix;
return httpProxy.get(getApplication(applicationId), hostName,
CLUSTERCONTROLLER_CONTAINER.serviceName, relativePath);
}
public Long getApplicationGeneration(ApplicationId applicationId) {
return getApplication(applicationId).getApplicationGeneration();
}
public void restart(ApplicationId applicationId, HostFilter hostFilter) {
hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter));
}
public boolean isSuspended(ApplicationId application) {
return orchestrator.getAllSuspendedApplications().contains(application);
}
public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) {
return fileDistributionStatus.status(getApplication(applicationId), timeout);
}
public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath) {
if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory");
Set<String> fileReferencesInUse = new HashSet<>();
listApplications().stream()
.map(this::getOptionalApplication)
.map(Optional::get)
.forEach(application -> fileReferencesInUse.addAll(application.getModel().fileReferences().stream()
.map(FileReference::value)
.collect(Collectors.toSet())));
log.log(LogLevel.DEBUG, "File references in use : " + fileReferencesInUse);
Set<String> fileReferencesOnDisk = new HashSet<>();
File[] filesOnDisk = fileReferencesPath.listFiles();
if (filesOnDisk != null)
fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet()));
log.log(LogLevel.DEBUG, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk);
Instant instant = Instant.now().minus(Duration.ofDays(14));
Set<String> fileReferencesToDelete = fileReferencesOnDisk
.stream()
.filter(fileReference -> ! fileReferencesInUse.contains(fileReference))
.filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant))
.collect(Collectors.toSet());
if (fileReferencesToDelete.size() > 0) {
log.log(LogLevel.INFO, "Will delete file references not in use: " + fileReferencesToDelete);
fileReferencesToDelete.forEach(fileReference -> {
File file = new File(fileReferencesPath, fileReference);
if ( ! IOUtils.recursiveDeleteDir(file))
log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath());
});
}
return fileReferencesToDelete;
}
public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) {
Tenant tenant = tenantRepository.getTenant(tenantName);
return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode);
}
private Application getApplication(ApplicationId applicationId) {
return getApplication(applicationId, Optional.empty());
}
private Application getApplication(ApplicationId applicationId, Optional<Version> version) {
try {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found");
long sessionId = getSessionIdForApplication(tenant, applicationId);
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId);
return session.ensureApplicationLoaded().getForVersionOrLatest(version, clock.instant());
} catch (NotFoundException e) {
log.log(LogLevel.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage());
throw e;
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed getting application for '" + applicationId + "'", e);
throw e;
}
}
private Optional<Application> getOptionalApplication(ApplicationId applicationId) {
try {
return Optional.of(getApplication(applicationId));
} catch (Exception e) {
return Optional.empty();
}
}
Set<ApplicationId> listApplications() {
return tenantRepository.getAllTenants().stream()
.flatMap(tenant -> tenant.getApplicationRepo().activeApplications().stream())
.collect(Collectors.toSet());
}
private boolean isFileLastModifiedBefore(File fileReference, Instant instant) {
BasicFileAttributes fileAttributes;
try {
fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class);
return fileAttributes.lastModifiedTime().toInstant().isBefore(instant);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) {
RemoteSessionRepo remoteSessionRepo = tenantRepository.getTenant(applicationId.tenant()).getRemoteSessionRepo();
Instant end = Instant.now().plus(waitTime);
do {
if (remoteSessionRepo.getSession(sessionId) == null) return true;
try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */}
} while (Instant.now().isBefore(end));
return false;
}
public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri,
Duration timeout, Optional<Version> vespaVersion) {
return convergeChecker.checkService(getApplication(applicationId, vespaVersion), hostAndPort, uri, timeout);
}
public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri,
Duration timeoutPerService, Optional<Version> vespaVersion) {
return convergeChecker.servicesToCheck(getApplication(applicationId, vespaVersion), uri, timeoutPerService);
}
public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) {
String logServerURI = getLogServerURI(applicationId, hostname) + apiParams;
return logRetriever.getLogs(logServerURI);
}
/**
* Gets the active Session for the given application id.
*
* @return the active session, or null if there is no active session for the given application id.
*/
public LocalSession getActiveSession(ApplicationId applicationId) {
return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId);
}
public long getSessionIdForApplication(ApplicationId applicationId) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found");
return getSessionIdForApplication(tenant, applicationId);
}
private long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
if (applicationRepo == null)
throw new NotFoundException("Application repo for tenant '" + tenant.getName() + "' not found");
return applicationRepo.requireActiveSessionOf(applicationId);
}
public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if ( ! Session.Status.PREPARE.equals(session.getStatus()))
throw new IllegalStateException("Session not prepared: " + sessionId);
}
public long createSessionFromExisting(ApplicationId applicationId,
DeployLogger logger,
boolean internalRedeploy,
TimeoutBudget timeoutBudget) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession fromSession = getExistingSession(tenant, applicationId);
LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) {
File tempDir = Files.createTempDir();
long sessionId;
try {
sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir));
} finally {
cleanupTempDirectory(tempDir);
}
return sessionId;
}
public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
tenant.getApplicationRepo().createApplication(applicationId);
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public void deleteExpiredLocalSessions() {
tenantRepository.getAllTenants().forEach(tenant -> tenant.getLocalSessionRepo().purgeOldSessions());
}
public int deleteExpiredRemoteSessions(Duration expiryTime) {
return tenantRepository.getAllTenants()
.stream()
.map(tenant -> tenant.getRemoteSessionRepo().deleteExpiredSessions(expiryTime))
.mapToInt(i -> i)
.sum();
}
public TenantRepository tenantRepository() {
return tenantRepository;
}
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) {
return tenantRepository.getAllTenantNames().stream()
.filter(tenantName -> activeApplications(tenantName).isEmpty())
.filter(tenantName -> !tenantName.equals(TenantName.defaultName()))
.filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT))
.filter(tenantName -> tenantRepository.getTenant(tenantName).getCreatedTime().isBefore(now.minus(ttlForUnusedTenant)))
.peek(tenantRepository::deleteTenant)
.collect(Collectors.toSet());
}
public void deleteTenant(TenantName tenantName) {
List<ApplicationId> activeApplications = activeApplications(tenantName);
if (activeApplications.isEmpty())
tenantRepository.deleteTenant(tenantName);
else
throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications);
}
private List<ApplicationId> activeApplications(TenantName tenantName) {
return tenantRepository.getTenant(tenantName).getApplicationRepo().activeApplications();
}
public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) {
return getLocalSession(tenant, sessionId).getMetaData();
}
public ConfigserverConfig configserverConfig() {
return configserverConfig;
}
private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) {
LocalSession session = getLocalSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
private LocalSession getLocalSession(Tenant tenant, long sessionId) {
LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private RemoteSession getRemoteSession(Tenant tenant, long sessionId) {
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) {
Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty();
TenantApplications applicationRepo = tenant.getApplicationRepo();
try {
long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId);
RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId);
if (currentActiveSession != null) {
currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded());
}
} catch (IllegalArgumentException e) {
}
return currentActiveApplicationSet;
}
private File decompressApplication(InputStream in, String contentType, File tempDir) {
try (CompressedApplicationInputStream application =
CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) {
return decompressApplication(application, tempDir);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to decompress data in body", e);
}
}
private File decompressApplication(CompressedApplicationInputStream in, File tempDir) {
try {
return in.decompress(tempDir);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to decompress stream", e);
}
}
private void cleanupTempDirectory(File tempDir) {
logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'");
if (!IOUtils.recursiveDeleteDir(tempDir)) {
logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'");
}
}
private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return getLocalSession(tenant, applicationRepo.requireActiveSessionOf(applicationId));
}
private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
if (applicationRepo.activeApplications().contains(applicationId)) {
return tenant.getLocalSessionRepo().getSession(applicationRepo.requireActiveSessionOf(applicationId));
}
return null;
}
private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) {
RestartActions restartActions = actions.getRestartActions();
if ( ! restartActions.isEmpty()) {
logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" +
restartActions.format());
}
RefeedActions refeedActions = actions.getRefeedActions();
if ( ! refeedActions.isEmpty()) {
boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed);
logger.log(allAllowed ? Level.INFO : Level.WARNING,
"Change(s) between active and new application that may require re-feed:\n" +
refeedActions.format());
}
}
private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) {
if (hostname.isPresent()) {
if (HOSTED_VESPA_TENANT.equals(applicationId.tenant()))
return "http:
else throw new IllegalArgumentException("Only hostname paramater unsupported for application " + applicationId);
}
Application application = getApplication(applicationId);
Collection<HostInfo> hostInfos = application.getModel().getHosts();
HostInfo logServerHostInfo = hostInfos.stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver")))
.findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HostInfo for LogServer"));
ServiceInfo serviceInfo = logServerHostInfo.getServices().stream().filter(service -> List.of(LOGSERVER_CONTAINER.serviceName, CONTAINER.serviceName).contains(service.getServiceType()))
.findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host"));
int port = servicePort(serviceInfo);
return "http:
}
private int servicePort(ServiceInfo serviceInfo) {
int port = serviceInfo.getPorts().stream()
.filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http")))
.findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port"))
.getPort();
return port;
}
/** Finds the hosts of an application, grouped by cluster name */
private Collection<ClusterInfo> getClustersOfApplication(ApplicationId applicationId) {
Application application = getApplication(applicationId);
Map<String, ClusterInfo> clusters = new HashMap<>();
application.getModel().getHosts().stream()
.filter(host -> host.getServices().stream().noneMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver")))
.forEach(hostInfo -> {
ClusterInfo clusterInfo = createClusterInfo(hostInfo);
URI host = URI.create("http:
clusters.computeIfAbsent(clusterInfo.getClusterId(), c -> clusterInfo).addHost(host);
}
);
return clusters.values();
}
private ClusterInfo createClusterInfo(HostInfo hostInfo) {
return hostInfo.getServices().stream()
.map(ClusterInfo::fromServiceInfo)
.filter(Optional::isPresent)
.findFirst().get().orElseThrow();
}
/** Returns version to use when deploying application in given environment */
static Version decideVersion(ApplicationId application, Environment environment, Version sessionVersion, boolean bootstrap) {
if ( environment.isManuallyDeployed()
&& sessionVersion.getMajor() == Vtag.currentVersion.getMajor()
&& ! HOSTED_VESPA_TENANT.equals(application.tenant())
&& ! application.instance().isTester()
&& ! bootstrap) {
return Vtag.currentVersion;
}
return sessionVersion;
}
public Slime createDeployLog() {
Slime deployLog = new Slime();
deployLog.setObject();
return deployLog;
}
public Zone zone() {
return new Zone(SystemName.from(configserverConfig.system()),
Environment.from(configserverConfig.environment()),
RegionName.from(configserverConfig.region()));
}
} | class ApplicationRepository implements com.yahoo.config.provision.Deployer {
private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName());
private final TenantRepository tenantRepository;
private final Optional<Provisioner> hostProvisioner;
private final Optional<InfraDeployer> infraDeployer;
private final ConfigConvergenceChecker convergeChecker;
private final HttpProxy httpProxy;
private final Clock clock;
private final DeployLogger logger = new SilentDeployLogger();
private final ConfigserverConfig configserverConfig;
private final FileDistributionStatus fileDistributionStatus;
private final Orchestrator orchestrator;
private final LogRetriever logRetriever;
@Inject
public ApplicationRepository(TenantRepository tenantRepository,
HostProvisionerProvider hostProvisionerProvider,
InfraDeployerProvider infraDeployerProvider,
ConfigConvergenceChecker configConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig,
Orchestrator orchestrator) {
this(tenantRepository,
hostProvisionerProvider.getHostProvisioner(),
infraDeployerProvider.getInfraDeployer(),
configConvergenceChecker,
httpProxy,
configserverConfig,
orchestrator,
new LogRetriever(),
new FileDistributionStatus(),
Clock.systemUTC());
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
Clock clock) {
this(tenantRepository,
hostProvisioner,
orchestrator,
new ConfigserverConfig(new ConfigserverConfig.Builder()),
new LogRetriever(),
clock);
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
LogRetriever logRetriever,
Clock clock) {
this(tenantRepository,
hostProvisioner,
orchestrator,
new ConfigserverConfig(new ConfigserverConfig.Builder()),
logRetriever,
clock);
}
public ApplicationRepository(TenantRepository tenantRepository,
Provisioner hostProvisioner,
Orchestrator orchestrator,
ConfigserverConfig configserverConfig,
LogRetriever logRetriever,
Clock clock) {
this(tenantRepository,
Optional.of(hostProvisioner),
Optional.empty(),
new ConfigConvergenceChecker(),
new HttpProxy(new SimpleHttpFetcher()),
configserverConfig,
orchestrator,
logRetriever,
new FileDistributionStatus(),
clock);
}
private ApplicationRepository(TenantRepository tenantRepository,
Optional<Provisioner> hostProvisioner,
Optional<InfraDeployer> infraDeployer,
ConfigConvergenceChecker configConvergenceChecker,
HttpProxy httpProxy,
ConfigserverConfig configserverConfig,
Orchestrator orchestrator,
LogRetriever logRetriever,
FileDistributionStatus fileDistributionStatus,
Clock clock) {
this.tenantRepository = tenantRepository;
this.hostProvisioner = hostProvisioner;
this.infraDeployer = infraDeployer;
this.convergeChecker = configConvergenceChecker;
this.httpProxy = httpProxy;
this.configserverConfig = configserverConfig;
this.orchestrator = orchestrator;
this.logRetriever = logRetriever;
this.fileDistributionStatus = fileDistributionStatus;
this.clock = clock;
}
public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) {
validateThatLocalSessionIsNotActive(tenant, sessionId);
LocalSession session = getLocalSession(tenant, sessionId);
ApplicationId applicationId = prepareParams.getApplicationId();
Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId);
Slime deployLog = createDeployLog();
DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId);
ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now);
logConfigChangeActions(actions, logger);
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. ");
return new PrepareResult(sessionId, actions, deployLog);
}
public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams,
boolean ignoreSessionStaleFailure, Instant now) {
PrepareResult result = prepare(tenant, sessionId, prepareParams, now);
activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure);
return result;
}
public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) {
return deploy(in, prepareParams, false, clock.instant());
}
public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams,
boolean ignoreSessionStaleFailure, Instant now) {
File tempDir = Files.createTempDir();
PrepareResult prepareResult;
try {
prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now);
} finally {
cleanupTempDirectory(tempDir);
}
return prepareResult;
}
public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) {
return deploy(applicationPackage, prepareParams, false, Instant.now());
}
public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams,
boolean ignoreSessionStaleFailure, Instant now) {
ApplicationId applicationId = prepareParams.getApplicationId();
long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage);
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
return prepareAndActivate(tenant, sessionId, prepareParams, ignoreSessionStaleFailure, now);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) {
return deployFromLocalActive(application, false);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @param bootstrap the deployment is done when bootstrapping
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application,
boolean bootstrap) {
return deployFromLocalActive(application,
Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)),
bootstrap);
}
/**
* Creates a new deployment from the active application, if available.
* This is used for system internal redeployments, not on application package changes.
*
* @param application the active application to be redeployed
* @param timeout the timeout to use for each individual deployment operation
* @param bootstrap the deployment is done when bootstrapping
* @return a new deployment from the local active, or empty if a local active application
* was not present for this id (meaning it either is not active or active on another
* node in the config server cluster)
*/
@Override
public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application,
Duration timeout,
boolean bootstrap) {
Optional<com.yahoo.config.provision.Deployment> infraDeployment = infraDeployer.flatMap(d -> d.getDeployment(application));
if (infraDeployment.isPresent()) return infraDeployment;
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
LocalSession activeSession = getActiveSession(tenant, application);
if (activeSession == null) return Optional.empty();
TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout);
LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, true, timeoutBudget);
tenant.getLocalSessionRepo().addSession(newSession);
Version version = decideVersion(application, zone().environment(), newSession.getVespaVersion(), bootstrap);
return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock,
false /* don't validate as this is already deployed */, version,
bootstrap));
}
@Override
public Optional<Instant> lastDeployTime(ApplicationId application) {
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
LocalSession activeSession = getActiveSession(tenant, application);
if (activeSession == null) return Optional.empty();
return Optional.of(Instant.ofEpochSecond(activeSession.getCreateTime()));
}
public ApplicationId activate(Tenant tenant,
long sessionId,
TimeoutBudget timeoutBudget,
boolean ignoreSessionStaleFailure) {
LocalSession localSession = getLocalSession(tenant, sessionId);
Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft());
deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure);
deployment.activate();
return localSession.getApplicationId();
}
private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) {
return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false);
}
/**
* Deletes an application
*
* @return true if the application was found and deleted, false if it was not present
* @throws RuntimeException if the delete transaction fails. This method is exception safe.
*/
public boolean delete(ApplicationId applicationId) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) return false;
TenantApplications tenantApplications = tenant.getApplicationRepo();
try (Lock lock = tenantApplications.lock(applicationId)) {
if ( ! tenantApplications.exists(applicationId)) return false;
boolean sessionDeleted = tenantApplications.activeSessionOf(applicationId).map(sessionId -> {
RemoteSession remoteSession = getRemoteSession(tenant, sessionId);
remoteSession.createDeleteTransaction().commit();
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted");
Duration waitTime = Duration.ofSeconds(60);
if (localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) {
log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted");
return true;
} else {
log.log(LogLevel.ERROR, TenantRepository.logPre(applicationId) + "Session " + sessionId + " was not deleted (waited " + waitTime + ")");
return false;
}
}).orElse(true);
NestedTransaction transaction = new NestedTransaction();
transaction.add(new Rotations(tenant.getCurator(), tenant.getPath()).delete(applicationId));
transaction.add(tenantApplications.createDeleteTransaction(applicationId));
hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId));
transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId));
transaction.commit();
return sessionDeleted;
}
}
public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) {
String relativePath = "clustercontroller-status/" + pathSuffix;
return httpProxy.get(getApplication(applicationId), hostName,
CLUSTERCONTROLLER_CONTAINER.serviceName, relativePath);
}
public Long getApplicationGeneration(ApplicationId applicationId) {
return getApplication(applicationId).getApplicationGeneration();
}
public void restart(ApplicationId applicationId, HostFilter hostFilter) {
hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter));
}
public boolean isSuspended(ApplicationId application) {
return orchestrator.getAllSuspendedApplications().contains(application);
}
public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) {
return fileDistributionStatus.status(getApplication(applicationId), timeout);
}
public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath) {
if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory");
Set<String> fileReferencesInUse = new HashSet<>();
listApplications().stream()
.map(this::getOptionalApplication)
.map(Optional::get)
.forEach(application -> fileReferencesInUse.addAll(application.getModel().fileReferences().stream()
.map(FileReference::value)
.collect(Collectors.toSet())));
log.log(LogLevel.DEBUG, "File references in use : " + fileReferencesInUse);
Set<String> fileReferencesOnDisk = new HashSet<>();
File[] filesOnDisk = fileReferencesPath.listFiles();
if (filesOnDisk != null)
fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet()));
log.log(LogLevel.DEBUG, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk);
Instant instant = Instant.now().minus(Duration.ofDays(14));
Set<String> fileReferencesToDelete = fileReferencesOnDisk
.stream()
.filter(fileReference -> ! fileReferencesInUse.contains(fileReference))
.filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant))
.collect(Collectors.toSet());
if (fileReferencesToDelete.size() > 0) {
log.log(LogLevel.INFO, "Will delete file references not in use: " + fileReferencesToDelete);
fileReferencesToDelete.forEach(fileReference -> {
File file = new File(fileReferencesPath, fileReference);
if ( ! IOUtils.recursiveDeleteDir(file))
log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath());
});
}
return fileReferencesToDelete;
}
public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) {
Tenant tenant = tenantRepository.getTenant(tenantName);
return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode);
}
private Application getApplication(ApplicationId applicationId) {
return getApplication(applicationId, Optional.empty());
}
private Application getApplication(ApplicationId applicationId, Optional<Version> version) {
try {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found");
long sessionId = getSessionIdForApplication(tenant, applicationId);
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId);
return session.ensureApplicationLoaded().getForVersionOrLatest(version, clock.instant());
} catch (NotFoundException e) {
log.log(LogLevel.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage());
throw e;
} catch (Exception e) {
log.log(LogLevel.WARNING, "Failed getting application for '" + applicationId + "'", e);
throw e;
}
}
private Optional<Application> getOptionalApplication(ApplicationId applicationId) {
try {
return Optional.of(getApplication(applicationId));
} catch (Exception e) {
return Optional.empty();
}
}
Set<ApplicationId> listApplications() {
return tenantRepository.getAllTenants().stream()
.flatMap(tenant -> tenant.getApplicationRepo().activeApplications().stream())
.collect(Collectors.toSet());
}
private boolean isFileLastModifiedBefore(File fileReference, Instant instant) {
BasicFileAttributes fileAttributes;
try {
fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class);
return fileAttributes.lastModifiedTime().toInstant().isBefore(instant);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) {
RemoteSessionRepo remoteSessionRepo = tenantRepository.getTenant(applicationId.tenant()).getRemoteSessionRepo();
Instant end = Instant.now().plus(waitTime);
do {
if (remoteSessionRepo.getSession(sessionId) == null) return true;
try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */}
} while (Instant.now().isBefore(end));
return false;
}
public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri,
Duration timeout, Optional<Version> vespaVersion) {
return convergeChecker.checkService(getApplication(applicationId, vespaVersion), hostAndPort, uri, timeout);
}
public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri,
Duration timeoutPerService, Optional<Version> vespaVersion) {
return convergeChecker.servicesToCheck(getApplication(applicationId, vespaVersion), uri, timeoutPerService);
}
public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) {
String logServerURI = getLogServerURI(applicationId, hostname) + apiParams;
return logRetriever.getLogs(logServerURI);
}
/**
* Gets the active Session for the given application id.
*
* @return the active session, or null if there is no active session for the given application id.
*/
public LocalSession getActiveSession(ApplicationId applicationId) {
return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId);
}
public long getSessionIdForApplication(ApplicationId applicationId) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found");
return getSessionIdForApplication(tenant, applicationId);
}
private long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
if (applicationRepo == null)
throw new NotFoundException("Application repo for tenant '" + tenant.getName() + "' not found");
return applicationRepo.requireActiveSessionOf(applicationId);
}
public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) {
RemoteSession session = getRemoteSession(tenant, sessionId);
if ( ! Session.Status.PREPARE.equals(session.getStatus()))
throw new IllegalStateException("Session not prepared: " + sessionId);
}
public long createSessionFromExisting(ApplicationId applicationId,
DeployLogger logger,
boolean internalRedeploy,
TimeoutBudget timeoutBudget) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession fromSession = getExistingSession(tenant, applicationId);
LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) {
File tempDir = Files.createTempDir();
long sessionId;
try {
sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir));
} finally {
cleanupTempDirectory(tempDir);
}
return sessionId;
}
public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) {
Tenant tenant = tenantRepository.getTenant(applicationId.tenant());
tenant.getApplicationRepo().createApplication(applicationId);
LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo();
SessionFactory sessionFactory = tenant.getSessionFactory();
LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget);
localSessionRepo.addSession(session);
return session.getSessionId();
}
public void deleteExpiredLocalSessions() {
tenantRepository.getAllTenants().forEach(tenant -> tenant.getLocalSessionRepo().purgeOldSessions());
}
public int deleteExpiredRemoteSessions(Duration expiryTime) {
return tenantRepository.getAllTenants()
.stream()
.map(tenant -> tenant.getRemoteSessionRepo().deleteExpiredSessions(expiryTime))
.mapToInt(i -> i)
.sum();
}
public TenantRepository tenantRepository() {
return tenantRepository;
}
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) {
return tenantRepository.getAllTenantNames().stream()
.filter(tenantName -> activeApplications(tenantName).isEmpty())
.filter(tenantName -> !tenantName.equals(TenantName.defaultName()))
.filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT))
.filter(tenantName -> tenantRepository.getTenant(tenantName).getCreatedTime().isBefore(now.minus(ttlForUnusedTenant)))
.peek(tenantRepository::deleteTenant)
.collect(Collectors.toSet());
}
public void deleteTenant(TenantName tenantName) {
List<ApplicationId> activeApplications = activeApplications(tenantName);
if (activeApplications.isEmpty())
tenantRepository.deleteTenant(tenantName);
else
throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications);
}
private List<ApplicationId> activeApplications(TenantName tenantName) {
return tenantRepository.getTenant(tenantName).getApplicationRepo().activeApplications();
}
public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) {
return getLocalSession(tenant, sessionId).getMetaData();
}
public ConfigserverConfig configserverConfig() {
return configserverConfig;
}
private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) {
LocalSession session = getLocalSession(tenant, sessionId);
if (Session.Status.ACTIVATE.equals(session.getStatus())) {
throw new IllegalStateException("Session is active: " + sessionId);
}
}
private LocalSession getLocalSession(Tenant tenant, long sessionId) {
LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private RemoteSession getRemoteSession(Tenant tenant, long sessionId) {
RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId);
if (session == null) throw new NotFoundException("Session " + sessionId + " was not found");
return session;
}
private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) {
Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty();
TenantApplications applicationRepo = tenant.getApplicationRepo();
try {
long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId);
RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId);
if (currentActiveSession != null) {
currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded());
}
} catch (IllegalArgumentException e) {
}
return currentActiveApplicationSet;
}
private File decompressApplication(InputStream in, String contentType, File tempDir) {
try (CompressedApplicationInputStream application =
CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) {
return decompressApplication(application, tempDir);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to decompress data in body", e);
}
}
private File decompressApplication(CompressedApplicationInputStream in, File tempDir) {
try {
return in.decompress(tempDir);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to decompress stream", e);
}
}
private void cleanupTempDirectory(File tempDir) {
logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'");
if (!IOUtils.recursiveDeleteDir(tempDir)) {
logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'");
}
}
private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
return getLocalSession(tenant, applicationRepo.requireActiveSessionOf(applicationId));
}
private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) {
TenantApplications applicationRepo = tenant.getApplicationRepo();
if (applicationRepo.activeApplications().contains(applicationId)) {
return tenant.getLocalSessionRepo().getSession(applicationRepo.requireActiveSessionOf(applicationId));
}
return null;
}
private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) {
RestartActions restartActions = actions.getRestartActions();
if ( ! restartActions.isEmpty()) {
logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" +
restartActions.format());
}
RefeedActions refeedActions = actions.getRefeedActions();
if ( ! refeedActions.isEmpty()) {
boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed);
logger.log(allAllowed ? Level.INFO : Level.WARNING,
"Change(s) between active and new application that may require re-feed:\n" +
refeedActions.format());
}
}
private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) {
if (hostname.isPresent()) {
if (HOSTED_VESPA_TENANT.equals(applicationId.tenant()))
return "http:
else throw new IllegalArgumentException("Only hostname paramater unsupported for application " + applicationId);
}
Application application = getApplication(applicationId);
Collection<HostInfo> hostInfos = application.getModel().getHosts();
HostInfo logServerHostInfo = hostInfos.stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver")))
.findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HostInfo for LogServer"));
ServiceInfo serviceInfo = logServerHostInfo.getServices().stream().filter(service -> List.of(LOGSERVER_CONTAINER.serviceName, CONTAINER.serviceName).contains(service.getServiceType()))
.findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host"));
int port = servicePort(serviceInfo);
return "http:
}
private int servicePort(ServiceInfo serviceInfo) {
int port = serviceInfo.getPorts().stream()
.filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http")))
.findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port"))
.getPort();
return port;
}
/** Returns version to use when deploying application in given environment */
static Version decideVersion(ApplicationId application, Environment environment, Version sessionVersion, boolean bootstrap) {
if ( environment.isManuallyDeployed()
&& sessionVersion.getMajor() == Vtag.currentVersion.getMajor()
&& ! HOSTED_VESPA_TENANT.equals(application.tenant())
&& ! application.instance().isTester()
&& ! bootstrap) {
return Vtag.currentVersion;
}
return sessionVersion;
}
public Slime createDeployLog() {
Slime deployLog = new Slime();
deployLog.setObject();
return deployLog;
}
public Zone zone() {
return new Zone(SystemName.from(configserverConfig.system()),
Environment.from(configserverConfig.environment()),
RegionName.from(configserverConfig.region()));
}
} |
Add the children hostnames to these error messages? | void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e)); | void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} |
If someone specified less resources than what is now legal, their deployment will fail. If someone specified resource range and they are nearly not utilizing anything, then they will be scaled to minimum - which is now increased, so they will be migrated. The rest should not be changed. | private long getThinPoolSize(NodeResources.StorageType storageType) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning()) {
if (zone().system() == SystemName.Public)
return 12;
else
return 24;
}
return 4;
} | return 12; | private long getThinPoolSize(NodeResources.StorageType storageType) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning()) {
if (zone().system() == SystemName.Public)
return 12;
else
return 24;
}
return 4;
} | class NodeResourceLimits {
private final NodeRepository nodeRepository;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) {
if (requested.isUnspecified()) return;
if (requested.vcpu() < minAdvertisedVcpu())
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu());
if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type()))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type()));
if (requested.diskGb() < minAdvertisedDiskGb(requested))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested));
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) {
return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository),
cluster.type());
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) {
if (realResources.isUnspecified()) return true;
if (realResources.vcpu() < minRealVcpu()) return false;
if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
}
public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType) {
if (requested.isUnspecified()) return requested;
return requested.withVcpu(Math.max(minAdvertisedVcpu(), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested), requested.diskGb()));
}
private double minAdvertisedVcpu() {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
return 0.5;
}
private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) {
if (zone().system() == SystemName.dev) return 1;
if (clusterType == ClusterSpec.Type.admin) return 2;
return 4;
}
private double minAdvertisedDiskGb(NodeResources requested) {
return minRealDiskGb() + getThinPoolSize(requested.storageType());
}
private double minRealVcpu() { return minAdvertisedVcpu(); }
private double minRealMemoryGb(ClusterSpec.Type clusterType) {
return minAdvertisedMemoryGb(clusterType) - 1.7;
}
private double minRealDiskGb() { return 6; }
private Zone zone() { return nodeRepository.zone(); }
private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) {
if ( ! unit.isEmpty())
unit = " " + unit;
String message = String.format(Locale.ENGLISH,
"%s cluster '%s': " + type + " " + resource +
" size is %.2f%s but must be at least %.2f%s",
cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit);
throw new IllegalArgumentException(message);
}
} | class NodeResourceLimits {
private final NodeRepository nodeRepository;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ClusterSpec cluster) {
if (requested.isUnspecified()) return;
if (requested.vcpu() < minAdvertisedVcpu())
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu());
if (requested.memoryGb() < minAdvertisedMemoryGb(cluster.type()))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster.type()));
if (requested.diskGb() < minAdvertisedDiskGb(requested))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested));
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ClusterSpec cluster) {
return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository),
cluster.type());
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeResources realResources, ClusterSpec.Type clusterType) {
if (realResources.isUnspecified()) return true;
if (realResources.vcpu() < minRealVcpu()) return false;
if (realResources.memoryGb() < minRealMemoryGb(clusterType)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
}
public NodeResources enlargeToLegal(NodeResources requested, ClusterSpec.Type clusterType) {
if (requested.isUnspecified()) return requested;
return requested.withVcpu(Math.max(minAdvertisedVcpu(), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(clusterType), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested), requested.diskGb()));
}
private double minAdvertisedVcpu() {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
return 0.5;
}
private double minAdvertisedMemoryGb(ClusterSpec.Type clusterType) {
if (zone().system() == SystemName.dev) return 1;
if (clusterType == ClusterSpec.Type.admin) return 2;
return 4;
}
private double minAdvertisedDiskGb(NodeResources requested) {
return minRealDiskGb() + getThinPoolSize(requested.storageType());
}
private double minRealVcpu() { return minAdvertisedVcpu(); }
private double minRealMemoryGb(ClusterSpec.Type clusterType) {
return minAdvertisedMemoryGb(clusterType) - 1.7;
}
private double minRealDiskGb() { return 6; }
private Zone zone() { return nodeRepository.zone(); }
private void illegal(String type, String resource, String unit, ClusterSpec cluster, double requested, double minAllowed) {
if ( ! unit.isEmpty())
unit = " " + unit;
String message = String.format(Locale.ENGLISH,
"%s cluster '%s': " + type + " " + resource +
" size is %.2f%s but must be at least %.2f%s",
cluster.type().name(), cluster.id().value(), requested, unit, minAllowed, unit);
throw new IllegalArgumentException(message);
}
} |
Agent.system | void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); | void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} |
A host that has been manually failed or parked would immediately be deprovisioned by this code, which seems wrong. | void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} | |
Nit: the -> a | void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} | |
Note: List typically have a rather poor remove(0) performance | void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | preProvisionCapacity.remove(capacityI); | void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} |
If there is only one empty host HB with capacity satisfying CB from the preprovision-capacity flag (with count 1), and there is also a capacity CA > CB in the flag (with count 1), then this algorithm would allocate CA and CB and deallocate HB. It is unnecessary to deallocate HB and allocate CB. | void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) { | void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} |
See comment above for why it is `operator`. | void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure"); | void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} |
Yes, but these will typically be tiny lists, probably less than 5 elements. | void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | preProvisionCapacity.remove(capacityI); | void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} |
Fixed by always looping through all the host and finding the cheapest one that satisfies the capacity. | void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) { | void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} |
Fixed | void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} | |
Will ignore hosts in `parked` that do not have `wantToDeprovision` set. | void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} | |
Added number of children to error message. | void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | log.log(Level.INFO, "Failed to provision " + host.hostname() + ": " + Exceptions.toMessageString(e)); | void updateProvisioningNodes(NodeList nodes, Mutex lock) {
Map<String, Node> provisionedHostsByHostname = nodes.state(Node.State.provisioned).nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
Map<Node, Set<Node>> nodesByProvisionedParent = nodes.asList().stream()
.filter(node -> node.parentHostname().map(provisionedHostsByHostname::containsKey).orElse(false))
.collect(Collectors.groupingBy(
node -> provisionedHostsByHostname.get(node.parentHostname().get()),
Collectors.toSet()));
nodesByProvisionedParent.forEach((host, children) -> {
try {
List<Node> updatedNodes = hostProvisioner.provision(host, children);
nodeRepository().write(updatedNodes, lock);
} catch (IllegalArgumentException | IllegalStateException e) {
log.log(Level.INFO, "Failed to provision " + host.hostname() + " with " + children.size() + " children: " +
Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
nodeRepository().failRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to provision " + host.hostname() + ", will retry in " + interval(), e);
}
});
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void convergeToCapacity(NodeList nodes) {
List<Node> emptyHosts = getAllocatableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder())
.collect(Collectors.toList());
int hostI = 0;
int capacityI = 0;
while (hostI < emptyHosts.size() && capacityI < preProvisionCapacity.size()) {
if (NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(emptyHosts.get(hostI).state()) &&
emptyHosts.get(hostI).flavor().resources().satisfies(preProvisionCapacity.get(capacityI))) {
emptyHosts.remove(hostI);
preProvisionCapacity.remove(capacityI);
} else {
hostI++;
}
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
emptyHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
/** @return Nodes of type host, in any state, that have no children with allocation */
private static List<Node> getAllocatableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values().stream()
.sorted((n1, n2) -> NodeResourceComparator.memoryDiskCpuOrder().compare(n1.flavor().resources(), n2.flavor().resources()))
.collect(Collectors.toList());
}
} | class DynamicProvisioningMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DynamicProvisioningMaintainer.class.getName());
private static final ApplicationId preprovisionAppId = ApplicationId.from("hosted-vespa", "tenant-host", "preprovision");
private final HostProvisioner hostProvisioner;
private final BooleanFlag dynamicProvisioningEnabled;
private final ListFlag<PreprovisionCapacity> preprovisionCapacityFlag;
DynamicProvisioningMaintainer(NodeRepository nodeRepository, Duration interval,
HostProvisioner hostProvisioner, FlagSource flagSource) {
super(nodeRepository, interval);
this.hostProvisioner = hostProvisioner;
this.dynamicProvisioningEnabled = Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource);
this.preprovisionCapacityFlag = Flags.PREPROVISION_CAPACITY.bindTo(flagSource);
}
@Override
protected void maintain() {
if (! dynamicProvisioningEnabled.value()) return;
try (Mutex lock = nodeRepository().lockAllocation()) {
NodeList nodes = nodeRepository().list();
updateProvisioningNodes(nodes, lock);
convergeToCapacity(nodes);
}
}
void convergeToCapacity(NodeList nodes) {
Collection<Node> removableHosts = getRemovableHosts(nodes);
List<NodeResources> preProvisionCapacity = preprovisionCapacityFlag.value().stream()
.flatMap(cap -> {
NodeResources resources = new NodeResources(cap.getVcpu(), cap.getMemoryGb(), cap.getDiskGb(), 1);
return IntStream.range(0, cap.getCount()).mapToObj(i -> resources);
})
.sorted(NodeResourceComparator.memoryDiskCpuOrder().reversed())
.collect(Collectors.toList());
for (Iterator<NodeResources> it = preProvisionCapacity.iterator(); it.hasNext() && !removableHosts.isEmpty();) {
NodeResources resources = it.next();
removableHosts.stream()
.filter(host -> NodePrioritizer.ALLOCATABLE_HOST_STATES.contains(host.state()))
.filter(host -> host.flavor().resources().satisfies(resources))
.min(Comparator.comparingInt(n -> n.flavor().cost()))
.ifPresent(host -> {
removableHosts.remove(host);
it.remove();
});
}
preProvisionCapacity.forEach(resources -> {
try {
List<Node> hosts = hostProvisioner.provisionHosts(
nodeRepository().database().getProvisionIndexes(1), resources, preprovisionAppId).stream()
.map(ProvisionedHost::generateHost)
.collect(Collectors.toList());
nodeRepository().addNodes(hosts);
} catch (OutOfCapacityException | IllegalArgumentException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ":" + e.getMessage());
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to pre-provision " + resources + ", will retry in " + interval(), e);
}
});
removableHosts.forEach(host -> {
try {
hostProvisioner.deprovision(host);
nodeRepository().removeRecursively(host, true);
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to deprovision " + host.hostname() + ", will retry in " + interval(), e);
}
});
}
private static Collection<Node> getRemovableHosts(NodeList nodes) {
Map<String, Node> hostsByHostname = nodes.nodeType(NodeType.host)
.asList().stream()
.filter(host -> host.state() != Node.State.parked || host.status().wantToDeprovision())
.collect(Collectors.toMap(Node::hostname, Function.identity()));
nodes.asList().stream()
.filter(node -> node.allocation().isPresent())
.flatMap(node -> node.parentHostname().stream())
.distinct()
.forEach(hostsByHostname::remove);
return hostsByHostname.values();
}
} |
Consider printing this as seconds as the query timeout is specified in seconds. | public Result doSearch2(Query query, Execution execution) {
initializeMissingQueryFields(query);
if (documentSelectionQueryParameterCount(query) != 1) {
return new Result(query, ErrorMessage.createBackendCommunicationError("Streaming search needs one and " +
"only one of these query parameters to be set: streaming.userid, streaming.groupname, " +
"streaming.selection"));
}
query.trace("Routing to search cluster " + getSearchClusterConfigId() + " and document type " + documentType, 4);
long timeStartedNanos = tracingOptions.getClock().nanoTimeNow();
int effectiveTraceLevel = inferEffectiveQueryTraceLevel(query);
Visitor visitor = visitorFactory.createVisitor(query, getSearchClusterConfigId(), route, documentType, effectiveTraceLevel);
try {
visitor.doSearch();
} catch (ParseException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(
"Failed to parse document selection string: " + e.getMessage() + "'."));
} catch (TokenMgrException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(
"Failed to tokenize document selection string: " + e.getMessage() + "'."));
} catch (TimeoutException e) {
double elapsedMillis = durationInMillisFromNanoTime(timeStartedNanos);
if ((effectiveTraceLevel > 0) && timeoutBadEnoughToBeReported(query, elapsedMillis)) {
tracingOptions.getTraceExporter().maybeExport(() -> new TraceDescription(visitor.getTrace(),
String.format("Trace of %s which timed out after %.2g ms",
query.toString(), elapsedMillis)));
}
return new Result(query, ErrorMessage.createTimeout(e.getMessage()));
} catch (InterruptedException|IllegalArgumentException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(e.getMessage()));
}
return buildResultFromCompletedVisitor(query, visitor);
} | String.format("Trace of %s which timed out after %.2g ms", | public Result doSearch2(Query query, Execution execution) {
initializeMissingQueryFields(query);
if (documentSelectionQueryParameterCount(query) != 1) {
return new Result(query, ErrorMessage.createBackendCommunicationError("Streaming search needs one and " +
"only one of these query parameters to be set: streaming.userid, streaming.groupname, " +
"streaming.selection"));
}
query.trace("Routing to search cluster " + getSearchClusterConfigId() + " and document type " + documentType, 4);
long timeStartedNanos = tracingOptions.getClock().nanoTimeNow();
int effectiveTraceLevel = inferEffectiveQueryTraceLevel(query);
Visitor visitor = visitorFactory.createVisitor(query, getSearchClusterConfigId(), route, documentType, effectiveTraceLevel);
try {
visitor.doSearch();
} catch (ParseException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(
"Failed to parse document selection string: " + e.getMessage() + "'."));
} catch (TokenMgrException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(
"Failed to tokenize document selection string: " + e.getMessage() + "'."));
} catch (TimeoutException e) {
double elapsedMillis = durationInMillisFromNanoTime(timeStartedNanos);
if ((effectiveTraceLevel > 0) && timeoutBadEnoughToBeReported(query, elapsedMillis)) {
tracingOptions.getTraceExporter().maybeExport(() -> new TraceDescription(visitor.getTrace(),
String.format("Trace of %s which timed out after %.3g seconds",
query.toString(), elapsedMillis / 1000.0)));
}
return new Result(query, ErrorMessage.createTimeout(e.getMessage()));
} catch (InterruptedException|IllegalArgumentException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(e.getMessage()));
}
return buildResultFromCompletedVisitor(query, visitor);
} | class VdsVisitorFactory implements VisitorFactory {
@Override
public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType, int traceLevelOverride) {
return new VdsVisitor(query, searchCluster, route, documentType, traceLevelOverride);
}
} | class VdsVisitorFactory implements VisitorFactory {
@Override
public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType, int traceLevelOverride) {
return new VdsVisitor(query, searchCluster, route, documentType, traceLevelOverride);
}
} |
Done | public Result doSearch2(Query query, Execution execution) {
initializeMissingQueryFields(query);
if (documentSelectionQueryParameterCount(query) != 1) {
return new Result(query, ErrorMessage.createBackendCommunicationError("Streaming search needs one and " +
"only one of these query parameters to be set: streaming.userid, streaming.groupname, " +
"streaming.selection"));
}
query.trace("Routing to search cluster " + getSearchClusterConfigId() + " and document type " + documentType, 4);
long timeStartedNanos = tracingOptions.getClock().nanoTimeNow();
int effectiveTraceLevel = inferEffectiveQueryTraceLevel(query);
Visitor visitor = visitorFactory.createVisitor(query, getSearchClusterConfigId(), route, documentType, effectiveTraceLevel);
try {
visitor.doSearch();
} catch (ParseException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(
"Failed to parse document selection string: " + e.getMessage() + "'."));
} catch (TokenMgrException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(
"Failed to tokenize document selection string: " + e.getMessage() + "'."));
} catch (TimeoutException e) {
double elapsedMillis = durationInMillisFromNanoTime(timeStartedNanos);
if ((effectiveTraceLevel > 0) && timeoutBadEnoughToBeReported(query, elapsedMillis)) {
tracingOptions.getTraceExporter().maybeExport(() -> new TraceDescription(visitor.getTrace(),
String.format("Trace of %s which timed out after %.2g ms",
query.toString(), elapsedMillis)));
}
return new Result(query, ErrorMessage.createTimeout(e.getMessage()));
} catch (InterruptedException|IllegalArgumentException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(e.getMessage()));
}
return buildResultFromCompletedVisitor(query, visitor);
} | String.format("Trace of %s which timed out after %.2g ms", | public Result doSearch2(Query query, Execution execution) {
initializeMissingQueryFields(query);
if (documentSelectionQueryParameterCount(query) != 1) {
return new Result(query, ErrorMessage.createBackendCommunicationError("Streaming search needs one and " +
"only one of these query parameters to be set: streaming.userid, streaming.groupname, " +
"streaming.selection"));
}
query.trace("Routing to search cluster " + getSearchClusterConfigId() + " and document type " + documentType, 4);
long timeStartedNanos = tracingOptions.getClock().nanoTimeNow();
int effectiveTraceLevel = inferEffectiveQueryTraceLevel(query);
Visitor visitor = visitorFactory.createVisitor(query, getSearchClusterConfigId(), route, documentType, effectiveTraceLevel);
try {
visitor.doSearch();
} catch (ParseException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(
"Failed to parse document selection string: " + e.getMessage() + "'."));
} catch (TokenMgrException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(
"Failed to tokenize document selection string: " + e.getMessage() + "'."));
} catch (TimeoutException e) {
double elapsedMillis = durationInMillisFromNanoTime(timeStartedNanos);
if ((effectiveTraceLevel > 0) && timeoutBadEnoughToBeReported(query, elapsedMillis)) {
tracingOptions.getTraceExporter().maybeExport(() -> new TraceDescription(visitor.getTrace(),
String.format("Trace of %s which timed out after %.3g seconds",
query.toString(), elapsedMillis / 1000.0)));
}
return new Result(query, ErrorMessage.createTimeout(e.getMessage()));
} catch (InterruptedException|IllegalArgumentException e) {
return new Result(query, ErrorMessage.createBackendCommunicationError(e.getMessage()));
}
return buildResultFromCompletedVisitor(query, visitor);
} | class VdsVisitorFactory implements VisitorFactory {
@Override
public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType, int traceLevelOverride) {
return new VdsVisitor(query, searchCluster, route, documentType, traceLevelOverride);
}
} | class VdsVisitorFactory implements VisitorFactory {
@Override
public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType, int traceLevelOverride) {
return new VdsVisitor(query, searchCluster, route, documentType, traceLevelOverride);
}
} |
"Result". But this message is in itself quite fishy. Could we make the error conditions here more crisp? | static boolean warmup(Linguistics linguistics) {
Query query = new Query("search/?yql=select%20*%20from%20sources%20where%20title%20contains%20'xyz';");
Result result = insertQuery(query, new ParserEnvironment().setLinguistics(linguistics));
if (result != null) {
log.warning("Something fishy. Reult = " + result.toString());
return false;
}
if ( ! "select * from sources where title contains \"xyz\";".equals(query.yqlRepresentation())) {
log.warning("Unexpected yql: " + query.yqlRepresentation());
return false;
}
return true;
} | log.warning("Something fishy. Reult = " + result.toString()); | static boolean warmup(Linguistics linguistics) {
Query query = new Query("search/?yql=select%20*%20from%20sources%20where%20title%20contains%20'xyz';");
Result result = insertQuery(query, new ParserEnvironment().setLinguistics(linguistics));
if (result != null) {
log.warning("Warmup code trigger an error. Error = " + result.toString());
return false;
}
if ( ! "select * from sources where title contains \"xyz\";".equals(query.yqlRepresentation())) {
log.warning("Warmup code generated unexpected yql: " + query.yqlRepresentation());
return false;
}
return true;
} | class MinimalQueryInserter extends Searcher {
public static final String EXTERNAL_YQL = "ExternalYql";
public static final CompoundName YQL = new CompoundName("yql");
private static final CompoundName MAX_HITS = new CompoundName("maxHits");
private static final CompoundName MAX_OFFSET = new CompoundName("maxOffset");
private static Logger log = Logger.getLogger(MinimalQueryInserter.class.getName());
@Inject
public MinimalQueryInserter(Linguistics linguistics) {
warmup(linguistics);
}
MinimalQueryInserter() {
this(new SimpleLinguistics());
}
static boolean warmup() {
return warmup(new SimpleLinguistics());
}
private static Result insertQuery(Query query, ParserEnvironment env) {
YqlParser parser = (YqlParser) ParserFactory.newInstance(Query.Type.YQL, env);
parser.setQueryParser(false);
parser.setUserQuery(query);
QueryTree newTree;
try {
Parsable parsable = Parsable.fromQueryModel(query.getModel()).setQuery(query.properties().getString(YQL));
newTree = parser.parse(parsable);
} catch (RuntimeException e) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Could not instantiate query from YQL", e));
}
if (parser.getOffset() != null) {
int maxHits = query.properties().getInteger(MAX_HITS);
int maxOffset = query.properties().getInteger(MAX_OFFSET);
if (parser.getOffset() > maxOffset) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested offset " + parser.getOffset()
+ ", but the max offset allowed is " + maxOffset + "."));
}
if (parser.getHits() > maxHits) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested " + parser.getHits()
+ " hits returned, but max hits allowed is " + maxHits + "."));
}
}
query.getModel().getQueryTree().setRoot(newTree.getRoot());
query.getPresentation().getSummaryFields().addAll(parser.getYqlSummaryFields());
for (VespaGroupingStep step : parser.getGroupingSteps()) {
GroupingRequest.newInstance(query)
.setRootOperation(step.getOperation())
.continuations().addAll(step.continuations());
}
if (parser.getYqlSources().size() == 0) {
query.getModel().getSources().clear();
} else {
query.getModel().getSources().addAll(parser.getYqlSources());
}
if (parser.getOffset() != null) {
query.setOffset(parser.getOffset());
query.setHits(parser.getHits());
}
if (parser.getTimeout() != null) {
query.setTimeout(parser.getTimeout().longValue());
}
if (parser.getSorting() != null) {
query.getRanking().setSorting(parser.getSorting());
}
query.trace("YQL+ query parsed", true, 2);
return null;
}
@Override
public Result search(Query query, Execution execution) {
if (query.properties().get(YQL) == null) return execution.search(query);
Result result = insertQuery(query, ParserEnvironment.fromExecutionContext(execution.context()));
return (result == null) ? execution.search(query) : result;
}
} | class MinimalQueryInserter extends Searcher {
public static final String EXTERNAL_YQL = "ExternalYql";
public static final CompoundName YQL = new CompoundName("yql");
private static final CompoundName MAX_HITS = new CompoundName("maxHits");
private static final CompoundName MAX_OFFSET = new CompoundName("maxOffset");
private static Logger log = Logger.getLogger(MinimalQueryInserter.class.getName());
@Inject
public MinimalQueryInserter(Linguistics linguistics) {
warmup(linguistics);
}
MinimalQueryInserter() {
this(new SimpleLinguistics());
}
static boolean warmup() {
return warmup(new SimpleLinguistics());
}
private
private static Result insertQuery(Query query, ParserEnvironment env) {
YqlParser parser = (YqlParser) ParserFactory.newInstance(Query.Type.YQL, env);
parser.setQueryParser(false);
parser.setUserQuery(query);
QueryTree newTree;
try {
Parsable parsable = Parsable.fromQueryModel(query.getModel()).setQuery(query.properties().getString(YQL));
newTree = parser.parse(parsable);
} catch (RuntimeException e) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Could not instantiate query from YQL", e));
}
if (parser.getOffset() != null) {
int maxHits = query.properties().getInteger(MAX_HITS);
int maxOffset = query.properties().getInteger(MAX_OFFSET);
if (parser.getOffset() > maxOffset) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested offset " + parser.getOffset()
+ ", but the max offset allowed is " + maxOffset + "."));
}
if (parser.getHits() > maxHits) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested " + parser.getHits()
+ " hits returned, but max hits allowed is " + maxHits + "."));
}
}
query.getModel().getQueryTree().setRoot(newTree.getRoot());
query.getPresentation().getSummaryFields().addAll(parser.getYqlSummaryFields());
for (VespaGroupingStep step : parser.getGroupingSteps()) {
GroupingRequest.newInstance(query)
.setRootOperation(step.getOperation())
.continuations().addAll(step.continuations());
}
if (parser.getYqlSources().size() == 0) {
query.getModel().getSources().clear();
} else {
query.getModel().getSources().addAll(parser.getYqlSources());
}
if (parser.getOffset() != null) {
query.setOffset(parser.getOffset());
query.setHits(parser.getHits());
}
if (parser.getTimeout() != null) {
query.setTimeout(parser.getTimeout().longValue());
}
if (parser.getSorting() != null) {
query.getRanking().setSorting(parser.getSorting());
}
query.trace("YQL+ query parsed", true, 2);
return null;
}
@Override
public Result search(Query query, Execution execution) {
if (query.properties().get(YQL) == null) return execution.search(query);
Result result = insertQuery(query, ParserEnvironment.fromExecutionContext(execution.context()));
return (result == null) ? execution.search(query) : result;
}
} |
Result | static boolean warmup(Linguistics linguistics) {
Query query = new Query("search/?yql=select%20*%20from%20sources%20where%20title%20contains%20'xyz';");
Result result = insertQuery(query, new ParserEnvironment().setLinguistics(linguistics));
if (result != null) {
log.warning("Something fishy. Reult = " + result.toString());
return false;
}
if ( ! "select * from sources where title contains \"xyz\";".equals(query.yqlRepresentation())) {
log.warning("Unexpected yql: " + query.yqlRepresentation());
return false;
}
return true;
} | log.warning("Something fishy. Reult = " + result.toString()); | static boolean warmup(Linguistics linguistics) {
Query query = new Query("search/?yql=select%20*%20from%20sources%20where%20title%20contains%20'xyz';");
Result result = insertQuery(query, new ParserEnvironment().setLinguistics(linguistics));
if (result != null) {
log.warning("Warmup code trigger an error. Error = " + result.toString());
return false;
}
if ( ! "select * from sources where title contains \"xyz\";".equals(query.yqlRepresentation())) {
log.warning("Warmup code generated unexpected yql: " + query.yqlRepresentation());
return false;
}
return true;
} | class MinimalQueryInserter extends Searcher {
public static final String EXTERNAL_YQL = "ExternalYql";
public static final CompoundName YQL = new CompoundName("yql");
private static final CompoundName MAX_HITS = new CompoundName("maxHits");
private static final CompoundName MAX_OFFSET = new CompoundName("maxOffset");
private static Logger log = Logger.getLogger(MinimalQueryInserter.class.getName());
@Inject
public MinimalQueryInserter(Linguistics linguistics) {
warmup(linguistics);
}
MinimalQueryInserter() {
this(new SimpleLinguistics());
}
static boolean warmup() {
return warmup(new SimpleLinguistics());
}
private static Result insertQuery(Query query, ParserEnvironment env) {
YqlParser parser = (YqlParser) ParserFactory.newInstance(Query.Type.YQL, env);
parser.setQueryParser(false);
parser.setUserQuery(query);
QueryTree newTree;
try {
Parsable parsable = Parsable.fromQueryModel(query.getModel()).setQuery(query.properties().getString(YQL));
newTree = parser.parse(parsable);
} catch (RuntimeException e) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Could not instantiate query from YQL", e));
}
if (parser.getOffset() != null) {
int maxHits = query.properties().getInteger(MAX_HITS);
int maxOffset = query.properties().getInteger(MAX_OFFSET);
if (parser.getOffset() > maxOffset) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested offset " + parser.getOffset()
+ ", but the max offset allowed is " + maxOffset + "."));
}
if (parser.getHits() > maxHits) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested " + parser.getHits()
+ " hits returned, but max hits allowed is " + maxHits + "."));
}
}
query.getModel().getQueryTree().setRoot(newTree.getRoot());
query.getPresentation().getSummaryFields().addAll(parser.getYqlSummaryFields());
for (VespaGroupingStep step : parser.getGroupingSteps()) {
GroupingRequest.newInstance(query)
.setRootOperation(step.getOperation())
.continuations().addAll(step.continuations());
}
if (parser.getYqlSources().size() == 0) {
query.getModel().getSources().clear();
} else {
query.getModel().getSources().addAll(parser.getYqlSources());
}
if (parser.getOffset() != null) {
query.setOffset(parser.getOffset());
query.setHits(parser.getHits());
}
if (parser.getTimeout() != null) {
query.setTimeout(parser.getTimeout().longValue());
}
if (parser.getSorting() != null) {
query.getRanking().setSorting(parser.getSorting());
}
query.trace("YQL+ query parsed", true, 2);
return null;
}
@Override
public Result search(Query query, Execution execution) {
if (query.properties().get(YQL) == null) return execution.search(query);
Result result = insertQuery(query, ParserEnvironment.fromExecutionContext(execution.context()));
return (result == null) ? execution.search(query) : result;
}
} | class MinimalQueryInserter extends Searcher {
public static final String EXTERNAL_YQL = "ExternalYql";
public static final CompoundName YQL = new CompoundName("yql");
private static final CompoundName MAX_HITS = new CompoundName("maxHits");
private static final CompoundName MAX_OFFSET = new CompoundName("maxOffset");
private static Logger log = Logger.getLogger(MinimalQueryInserter.class.getName());
@Inject
public MinimalQueryInserter(Linguistics linguistics) {
warmup(linguistics);
}
MinimalQueryInserter() {
this(new SimpleLinguistics());
}
static boolean warmup() {
return warmup(new SimpleLinguistics());
}
private
private static Result insertQuery(Query query, ParserEnvironment env) {
YqlParser parser = (YqlParser) ParserFactory.newInstance(Query.Type.YQL, env);
parser.setQueryParser(false);
parser.setUserQuery(query);
QueryTree newTree;
try {
Parsable parsable = Parsable.fromQueryModel(query.getModel()).setQuery(query.properties().getString(YQL));
newTree = parser.parse(parsable);
} catch (RuntimeException e) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Could not instantiate query from YQL", e));
}
if (parser.getOffset() != null) {
int maxHits = query.properties().getInteger(MAX_HITS);
int maxOffset = query.properties().getInteger(MAX_OFFSET);
if (parser.getOffset() > maxOffset) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested offset " + parser.getOffset()
+ ", but the max offset allowed is " + maxOffset + "."));
}
if (parser.getHits() > maxHits) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested " + parser.getHits()
+ " hits returned, but max hits allowed is " + maxHits + "."));
}
}
query.getModel().getQueryTree().setRoot(newTree.getRoot());
query.getPresentation().getSummaryFields().addAll(parser.getYqlSummaryFields());
for (VespaGroupingStep step : parser.getGroupingSteps()) {
GroupingRequest.newInstance(query)
.setRootOperation(step.getOperation())
.continuations().addAll(step.continuations());
}
if (parser.getYqlSources().size() == 0) {
query.getModel().getSources().clear();
} else {
query.getModel().getSources().addAll(parser.getYqlSources());
}
if (parser.getOffset() != null) {
query.setOffset(parser.getOffset());
query.setHits(parser.getHits());
}
if (parser.getTimeout() != null) {
query.setTimeout(parser.getTimeout().longValue());
}
if (parser.getSorting() != null) {
query.getRanking().setSorting(parser.getSorting());
}
query.trace("YQL+ query parsed", true, 2);
return null;
}
@Override
public Result search(Query query, Execution execution) {
if (query.properties().get(YQL) == null) return execution.search(query);
Result result = insertQuery(query, ParserEnvironment.fromExecutionContext(execution.context()));
return (result == null) ? execution.search(query) : result;
}
} |
Fixed | static boolean warmup(Linguistics linguistics) {
Query query = new Query("search/?yql=select%20*%20from%20sources%20where%20title%20contains%20'xyz';");
Result result = insertQuery(query, new ParserEnvironment().setLinguistics(linguistics));
if (result != null) {
log.warning("Something fishy. Reult = " + result.toString());
return false;
}
if ( ! "select * from sources where title contains \"xyz\";".equals(query.yqlRepresentation())) {
log.warning("Unexpected yql: " + query.yqlRepresentation());
return false;
}
return true;
} | log.warning("Something fishy. Reult = " + result.toString()); | static boolean warmup(Linguistics linguistics) {
Query query = new Query("search/?yql=select%20*%20from%20sources%20where%20title%20contains%20'xyz';");
Result result = insertQuery(query, new ParserEnvironment().setLinguistics(linguistics));
if (result != null) {
log.warning("Warmup code trigger an error. Error = " + result.toString());
return false;
}
if ( ! "select * from sources where title contains \"xyz\";".equals(query.yqlRepresentation())) {
log.warning("Warmup code generated unexpected yql: " + query.yqlRepresentation());
return false;
}
return true;
} | class MinimalQueryInserter extends Searcher {
public static final String EXTERNAL_YQL = "ExternalYql";
public static final CompoundName YQL = new CompoundName("yql");
private static final CompoundName MAX_HITS = new CompoundName("maxHits");
private static final CompoundName MAX_OFFSET = new CompoundName("maxOffset");
private static Logger log = Logger.getLogger(MinimalQueryInserter.class.getName());
@Inject
public MinimalQueryInserter(Linguistics linguistics) {
warmup(linguistics);
}
MinimalQueryInserter() {
this(new SimpleLinguistics());
}
static boolean warmup() {
return warmup(new SimpleLinguistics());
}
private static Result insertQuery(Query query, ParserEnvironment env) {
YqlParser parser = (YqlParser) ParserFactory.newInstance(Query.Type.YQL, env);
parser.setQueryParser(false);
parser.setUserQuery(query);
QueryTree newTree;
try {
Parsable parsable = Parsable.fromQueryModel(query.getModel()).setQuery(query.properties().getString(YQL));
newTree = parser.parse(parsable);
} catch (RuntimeException e) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Could not instantiate query from YQL", e));
}
if (parser.getOffset() != null) {
int maxHits = query.properties().getInteger(MAX_HITS);
int maxOffset = query.properties().getInteger(MAX_OFFSET);
if (parser.getOffset() > maxOffset) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested offset " + parser.getOffset()
+ ", but the max offset allowed is " + maxOffset + "."));
}
if (parser.getHits() > maxHits) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested " + parser.getHits()
+ " hits returned, but max hits allowed is " + maxHits + "."));
}
}
query.getModel().getQueryTree().setRoot(newTree.getRoot());
query.getPresentation().getSummaryFields().addAll(parser.getYqlSummaryFields());
for (VespaGroupingStep step : parser.getGroupingSteps()) {
GroupingRequest.newInstance(query)
.setRootOperation(step.getOperation())
.continuations().addAll(step.continuations());
}
if (parser.getYqlSources().size() == 0) {
query.getModel().getSources().clear();
} else {
query.getModel().getSources().addAll(parser.getYqlSources());
}
if (parser.getOffset() != null) {
query.setOffset(parser.getOffset());
query.setHits(parser.getHits());
}
if (parser.getTimeout() != null) {
query.setTimeout(parser.getTimeout().longValue());
}
if (parser.getSorting() != null) {
query.getRanking().setSorting(parser.getSorting());
}
query.trace("YQL+ query parsed", true, 2);
return null;
}
@Override
public Result search(Query query, Execution execution) {
if (query.properties().get(YQL) == null) return execution.search(query);
Result result = insertQuery(query, ParserEnvironment.fromExecutionContext(execution.context()));
return (result == null) ? execution.search(query) : result;
}
} | class MinimalQueryInserter extends Searcher {
public static final String EXTERNAL_YQL = "ExternalYql";
public static final CompoundName YQL = new CompoundName("yql");
private static final CompoundName MAX_HITS = new CompoundName("maxHits");
private static final CompoundName MAX_OFFSET = new CompoundName("maxOffset");
private static Logger log = Logger.getLogger(MinimalQueryInserter.class.getName());
@Inject
public MinimalQueryInserter(Linguistics linguistics) {
warmup(linguistics);
}
MinimalQueryInserter() {
this(new SimpleLinguistics());
}
static boolean warmup() {
return warmup(new SimpleLinguistics());
}
private
private static Result insertQuery(Query query, ParserEnvironment env) {
YqlParser parser = (YqlParser) ParserFactory.newInstance(Query.Type.YQL, env);
parser.setQueryParser(false);
parser.setUserQuery(query);
QueryTree newTree;
try {
Parsable parsable = Parsable.fromQueryModel(query.getModel()).setQuery(query.properties().getString(YQL));
newTree = parser.parse(parsable);
} catch (RuntimeException e) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Could not instantiate query from YQL", e));
}
if (parser.getOffset() != null) {
int maxHits = query.properties().getInteger(MAX_HITS);
int maxOffset = query.properties().getInteger(MAX_OFFSET);
if (parser.getOffset() > maxOffset) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested offset " + parser.getOffset()
+ ", but the max offset allowed is " + maxOffset + "."));
}
if (parser.getHits() > maxHits) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested " + parser.getHits()
+ " hits returned, but max hits allowed is " + maxHits + "."));
}
}
query.getModel().getQueryTree().setRoot(newTree.getRoot());
query.getPresentation().getSummaryFields().addAll(parser.getYqlSummaryFields());
for (VespaGroupingStep step : parser.getGroupingSteps()) {
GroupingRequest.newInstance(query)
.setRootOperation(step.getOperation())
.continuations().addAll(step.continuations());
}
if (parser.getYqlSources().size() == 0) {
query.getModel().getSources().clear();
} else {
query.getModel().getSources().addAll(parser.getYqlSources());
}
if (parser.getOffset() != null) {
query.setOffset(parser.getOffset());
query.setHits(parser.getHits());
}
if (parser.getTimeout() != null) {
query.setTimeout(parser.getTimeout().longValue());
}
if (parser.getSorting() != null) {
query.getRanking().setSorting(parser.getSorting());
}
query.trace("YQL+ query parsed", true, 2);
return null;
}
@Override
public Result search(Query query, Execution execution) {
if (query.properties().get(YQL) == null) return execution.search(query);
Result result = insertQuery(query, ParserEnvironment.fromExecutionContext(execution.context()));
return (result == null) ? execution.search(query) : result;
}
} |
Crisper.. | static boolean warmup(Linguistics linguistics) {
Query query = new Query("search/?yql=select%20*%20from%20sources%20where%20title%20contains%20'xyz';");
Result result = insertQuery(query, new ParserEnvironment().setLinguistics(linguistics));
if (result != null) {
log.warning("Something fishy. Reult = " + result.toString());
return false;
}
if ( ! "select * from sources where title contains \"xyz\";".equals(query.yqlRepresentation())) {
log.warning("Unexpected yql: " + query.yqlRepresentation());
return false;
}
return true;
} | log.warning("Something fishy. Reult = " + result.toString()); | static boolean warmup(Linguistics linguistics) {
Query query = new Query("search/?yql=select%20*%20from%20sources%20where%20title%20contains%20'xyz';");
Result result = insertQuery(query, new ParserEnvironment().setLinguistics(linguistics));
if (result != null) {
log.warning("Warmup code trigger an error. Error = " + result.toString());
return false;
}
if ( ! "select * from sources where title contains \"xyz\";".equals(query.yqlRepresentation())) {
log.warning("Warmup code generated unexpected yql: " + query.yqlRepresentation());
return false;
}
return true;
} | class MinimalQueryInserter extends Searcher {
public static final String EXTERNAL_YQL = "ExternalYql";
public static final CompoundName YQL = new CompoundName("yql");
private static final CompoundName MAX_HITS = new CompoundName("maxHits");
private static final CompoundName MAX_OFFSET = new CompoundName("maxOffset");
private static Logger log = Logger.getLogger(MinimalQueryInserter.class.getName());
@Inject
public MinimalQueryInserter(Linguistics linguistics) {
warmup(linguistics);
}
MinimalQueryInserter() {
this(new SimpleLinguistics());
}
static boolean warmup() {
return warmup(new SimpleLinguistics());
}
private static Result insertQuery(Query query, ParserEnvironment env) {
YqlParser parser = (YqlParser) ParserFactory.newInstance(Query.Type.YQL, env);
parser.setQueryParser(false);
parser.setUserQuery(query);
QueryTree newTree;
try {
Parsable parsable = Parsable.fromQueryModel(query.getModel()).setQuery(query.properties().getString(YQL));
newTree = parser.parse(parsable);
} catch (RuntimeException e) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Could not instantiate query from YQL", e));
}
if (parser.getOffset() != null) {
int maxHits = query.properties().getInteger(MAX_HITS);
int maxOffset = query.properties().getInteger(MAX_OFFSET);
if (parser.getOffset() > maxOffset) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested offset " + parser.getOffset()
+ ", but the max offset allowed is " + maxOffset + "."));
}
if (parser.getHits() > maxHits) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested " + parser.getHits()
+ " hits returned, but max hits allowed is " + maxHits + "."));
}
}
query.getModel().getQueryTree().setRoot(newTree.getRoot());
query.getPresentation().getSummaryFields().addAll(parser.getYqlSummaryFields());
for (VespaGroupingStep step : parser.getGroupingSteps()) {
GroupingRequest.newInstance(query)
.setRootOperation(step.getOperation())
.continuations().addAll(step.continuations());
}
if (parser.getYqlSources().size() == 0) {
query.getModel().getSources().clear();
} else {
query.getModel().getSources().addAll(parser.getYqlSources());
}
if (parser.getOffset() != null) {
query.setOffset(parser.getOffset());
query.setHits(parser.getHits());
}
if (parser.getTimeout() != null) {
query.setTimeout(parser.getTimeout().longValue());
}
if (parser.getSorting() != null) {
query.getRanking().setSorting(parser.getSorting());
}
query.trace("YQL+ query parsed", true, 2);
return null;
}
@Override
public Result search(Query query, Execution execution) {
if (query.properties().get(YQL) == null) return execution.search(query);
Result result = insertQuery(query, ParserEnvironment.fromExecutionContext(execution.context()));
return (result == null) ? execution.search(query) : result;
}
} | class MinimalQueryInserter extends Searcher {
public static final String EXTERNAL_YQL = "ExternalYql";
public static final CompoundName YQL = new CompoundName("yql");
private static final CompoundName MAX_HITS = new CompoundName("maxHits");
private static final CompoundName MAX_OFFSET = new CompoundName("maxOffset");
private static Logger log = Logger.getLogger(MinimalQueryInserter.class.getName());
@Inject
public MinimalQueryInserter(Linguistics linguistics) {
warmup(linguistics);
}
MinimalQueryInserter() {
this(new SimpleLinguistics());
}
static boolean warmup() {
return warmup(new SimpleLinguistics());
}
private
private static Result insertQuery(Query query, ParserEnvironment env) {
YqlParser parser = (YqlParser) ParserFactory.newInstance(Query.Type.YQL, env);
parser.setQueryParser(false);
parser.setUserQuery(query);
QueryTree newTree;
try {
Parsable parsable = Parsable.fromQueryModel(query.getModel()).setQuery(query.properties().getString(YQL));
newTree = parser.parse(parsable);
} catch (RuntimeException e) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Could not instantiate query from YQL", e));
}
if (parser.getOffset() != null) {
int maxHits = query.properties().getInteger(MAX_HITS);
int maxOffset = query.properties().getInteger(MAX_OFFSET);
if (parser.getOffset() > maxOffset) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested offset " + parser.getOffset()
+ ", but the max offset allowed is " + maxOffset + "."));
}
if (parser.getHits() > maxHits) {
return new Result(query, ErrorMessage.createInvalidQueryParameter("Requested " + parser.getHits()
+ " hits returned, but max hits allowed is " + maxHits + "."));
}
}
query.getModel().getQueryTree().setRoot(newTree.getRoot());
query.getPresentation().getSummaryFields().addAll(parser.getYqlSummaryFields());
for (VespaGroupingStep step : parser.getGroupingSteps()) {
GroupingRequest.newInstance(query)
.setRootOperation(step.getOperation())
.continuations().addAll(step.continuations());
}
if (parser.getYqlSources().size() == 0) {
query.getModel().getSources().clear();
} else {
query.getModel().getSources().addAll(parser.getYqlSources());
}
if (parser.getOffset() != null) {
query.setOffset(parser.getOffset());
query.setHits(parser.getHits());
}
if (parser.getTimeout() != null) {
query.setTimeout(parser.getTimeout().longValue());
}
if (parser.getSorting() != null) {
query.getRanking().setSorting(parser.getSorting());
}
query.trace("YQL+ query parsed", true, 2);
return null;
}
@Override
public Result search(Query query, Execution execution) {
if (query.properties().get(YQL) == null) return execution.search(query);
Result result = insertQuery(query, ParserEnvironment.fromExecutionContext(execution.context()));
return (result == null) ? execution.search(query) : result;
}
} |
no need for cast here | public Result get() {
try {
return super.get();
}
catch (InterruptedException e) {
return new Result(getQuery(), createInterruptedError(e));
}
catch (ExecutionException e) {
return new Result(getQuery(), createExecutionError((ExecutionException)e));
}
} | return new Result(getQuery(), createExecutionError((ExecutionException)e)); | public Result get() {
try {
return super.get();
}
catch (InterruptedException e) {
return new Result(getQuery(), createInterruptedError(e));
}
catch (ExecutionException e) {
return new Result(getQuery(), createExecutionError(e));
}
} | class FutureResult extends FutureTask<Result> {
private final Query query;
/** Only used for generating messages */
private final Execution execution;
private final static Logger log = Logger.getLogger(FutureResult.class.getName());
public FutureResult(Callable<Result> callable, Execution execution, Query query) {
super(callable);
this.query = query;
this.execution = execution;
}
/**
* Returns a Result containing the hits returned from this source, or an error otherwise.
* This will block for however long it takes to get the result: Using this is a bad idea.
*/
@Override
/**
* Returns a Result containing the hits returned from this source, or an error otherwise.
* This blocks for at most the given timeout and returns a Result containing a timeout error
* if the result is not available within this time.
*/
@Override
public Result get(long timeout, TimeUnit timeunit) {
return getIfAvailable(timeout, timeunit)
.orElseGet(() -> new Result(getQuery(), createTimeoutError()));
}
/**
* Same as get(timeout, timeunit) but returns Optional.empty instead of a result with error if the result is
* not available in time
*/
public Optional<Result> getIfAvailable(long timeout, TimeUnit timeunit) {
try {
return Optional.of(super.get(timeout, timeunit));
}
catch (InterruptedException e) {
return Optional.of(new Result(getQuery(), createInterruptedError(e)));
}
catch (ExecutionException e) {
if (e.getCause() instanceof com.yahoo.search.federation.TimeoutException)
return Optional.empty();
return Optional.of(new Result(getQuery(), createExecutionError((ExecutionException)e)));
}
catch (TimeoutException e) {
return Optional.empty();
}
}
/** Returns the query used in this execution, never null */
public Query getQuery() {
return query;
}
private ErrorMessage createInterruptedError(Exception e) {
return ErrorMessage.createUnspecifiedError(execution + " was interrupted while executing: " +
Exceptions.toMessageString(e));
}
private ErrorMessage createExecutionError(ExecutionException e) {
log.log(Level.WARNING,"Exception in " + execution + " of " + query, e.getCause());
return ErrorMessage.createErrorInPluginSearcher("Error in '" + execution + "': " +
Exceptions.toMessageString(e.getCause()), e.getCause());
}
public ErrorMessage createTimeoutError() {
return ErrorMessage.createTimeout("Error in " + execution + ": Chain timed out.");
}
} | class FutureResult extends FutureTask<Result> {
private final Query query;
/** Only used for generating messages */
private final Execution execution;
private final static Logger log = Logger.getLogger(FutureResult.class.getName());
public FutureResult(Callable<Result> callable, Execution execution, Query query) {
super(callable);
this.query = query;
this.execution = execution;
}
/**
* Returns a Result containing the hits returned from this source, or an error otherwise.
* This will block for however long it takes to get the result: Using this is a bad idea.
*/
@Override
/**
* Returns a Result containing the hits returned from this source, or an error otherwise.
* This blocks for at most the given timeout and returns a Result containing a timeout error
* if the result is not available within this time.
*/
@Override
public Result get(long timeout, TimeUnit timeunit) {
return getIfAvailable(timeout, timeunit)
.orElseGet(() -> new Result(getQuery(), createTimeoutError()));
}
/**
* Same as get(timeout, timeunit) but returns Optional.empty instead of a result with error if the result is
* not available in time
*/
public Optional<Result> getIfAvailable(long timeout, TimeUnit timeunit) {
try {
return Optional.of(super.get(timeout, timeunit));
}
catch (InterruptedException e) {
return Optional.of(new Result(getQuery(), createInterruptedError(e)));
}
catch (ExecutionException e) {
if (e.getCause() instanceof com.yahoo.search.federation.TimeoutException)
return Optional.empty();
return Optional.of(new Result(getQuery(), createExecutionError(e)));
}
catch (TimeoutException e) {
return Optional.empty();
}
}
/** Returns the query used in this execution, never null */
public Query getQuery() {
return query;
}
private ErrorMessage createInterruptedError(Exception e) {
return ErrorMessage.createUnspecifiedError(execution + " was interrupted while executing: " +
Exceptions.toMessageString(e));
}
private ErrorMessage createExecutionError(ExecutionException e) {
log.log(Level.WARNING,"Exception in " + execution + " of " + query, e.getCause());
return ErrorMessage.createErrorInPluginSearcher("Error in '" + execution + "': " +
Exceptions.toMessageString(e.getCause()), e.getCause());
}
public ErrorMessage createTimeoutError() {
return ErrorMessage.createTimeout("Error in " + execution + ": Chain timed out.");
}
} |
`applicattion` -> `application` | public void deleteApplication(TenantName tenantName, ApplicationName applicationName, Optional<Credentials> credentials) {
Tenant tenant = controller.tenants().require(tenantName);
if (tenant.type() != Tenant.Type.user && credentials.isEmpty())
throw new IllegalArgumentException("Could not delete application '" + tenantName + "." + applicationName + "': No credentials provided");
List<ApplicationId> instances = asList(tenantName).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationName))
.collect(Collectors.toList());
if (instances.size() > 1)
throw new IllegalArgumentException("Could not delete applicattion; more than one instance present: " + instances);
instances.forEach(id -> deleteInstance(id, credentials));
if (tenant.type() != Tenant.Type.user)
accessControl.deleteApplication(ApplicationId.from(tenantName, applicationName, InstanceName.defaultName()), credentials.get());
} | throw new IllegalArgumentException("Could not delete applicattion; more than one instance present: " + instances); | public void deleteApplication(TenantName tenantName, ApplicationName applicationName, Optional<Credentials> credentials) {
Tenant tenant = controller.tenants().require(tenantName);
if (tenant.type() != Tenant.Type.user && credentials.isEmpty())
throw new IllegalArgumentException("Could not delete application '" + tenantName + "." + applicationName + "': No credentials provided");
List<ApplicationId> instances = asList(tenantName).stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationName))
.collect(Collectors.toList());
if (instances.size() > 1)
throw new IllegalArgumentException("Could not delete application; more than one instance present: " + instances);
instances.forEach(id -> deleteInstance(id, credentials));
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final RoutingPolicies routingPolicies;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final BooleanFlag provisionApplicationCertificate;
private final DeploymentSpecValidator deploymentSpecValidator;
ApplicationController(Controller controller, CuratorDb curator,
AccessControl accessControl, RotationsConfig rotationsConfig,
Clock clock) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.routingGenerator = controller.serviceRegistry().routingGenerator();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
routingPolicies = new RoutingPolicies(controller);
rotationRepository = new RotationRepository(rotationsConfig, this, curator);
deploymentTrigger = new DeploymentTrigger(controller, controller.serviceRegistry().buildService(), clock);
provisionApplicationCertificate = Flags.PROVISION_APPLICATION_CERTIFICATE.bindTo(controller.flagSource());
deploymentSpecValidator = new DeploymentSpecValidator(controller);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer.nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/** Change the global endpoint status for given deployment */
public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) {
findGlobalEndpoint(deployment).map(endpoint -> {
try {
configServer.setGlobalRotationStatus(deployment, endpoint.upstreamName(), status);
return endpoint;
} catch (Exception e) {
throw new RuntimeException("Failed to set rotation status of " + deployment, e);
}
}).orElseThrow(() -> new IllegalArgumentException("No global endpoint exists for " + deployment));
}
/** Get global endpoint status for given deployment */
public Map<RoutingEndpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) {
return findGlobalEndpoint(deployment).map(endpoint -> {
try {
EndpointStatus status = configServer.getGlobalRotationStatus(deployment, endpoint.upstreamName());
return Map.of(endpoint, status);
} catch (Exception e) {
throw new RuntimeException("Failed to get rotation status of " + deployment, e);
}
}).orElseGet(Collections::emptyMap);
}
/** Find the global endpoint of given deployment, if any */
private Optional<RoutingEndpoint> findGlobalEndpoint(DeploymentId deployment) {
return routingGenerator.endpoints(deployment).stream()
.filter(RoutingEndpoint::isGlobal)
.findFirst();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<Credentials> credentials) {
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().get(id.tenant());
if (tenant.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (tenant.get().type() != Tenant.Type.user) {
if (credentials.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': No credentials provided");
if ( ! id.instance().isTester())
accessControl.createApplication(id, credentials.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options, Optional.empty());
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options,
Optional<Principal> deployingIdentity) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() == Tenant.Type.user && get(applicationId).isEmpty())
createApplication(applicationId, Optional.empty());
try (Lock deploymentLock = lockForDeployment(applicationId, zone)) {
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
Set<ContainerEndpoint> endpoints;
Optional<ApplicationCertificate> applicationCertificate;
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(require(applicationId), lock);
boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
if (manuallyDeployed) {
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
platformVersion = options.vespaVersion.map(Version::new).orElse(applicationPackage.deploymentSpec().majorVersion()
.flatMap(this::lastCompatibleVersion)
.orElseGet(controller::systemVersion));
}
else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + "."));
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( job.isEmpty()
|| job.get().lastTriggered().isEmpty()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion ? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion ? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
applicationPackage = withTesterCertificate(applicationPackage, applicationId, jobType);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage, deployingIdentity);
application = withRotation(application, zone);
endpoints = registerEndpointsInDns(application.get(), zone);
if (controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) {
List<? extends ZoneApi> zones = controller.zoneRegistry().zones().all().zones();
applicationCertificate = getApplicationCertificate(application.get());
} else {
applicationCertificate = Optional.empty();
}
if ( ! preferOldestVersion
&& ! application.get().deploymentJobs().deployedInternally()
&& ! zone.environment().isManuallyDeployed())
storeWithUpdatedConfig(application, applicationPackage);
}
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, endpoints,
applicationCertificate.orElse(null));
lockOrThrow(applicationId, application ->
store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
warningsFrom(result))));
return result;
}
}
private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) {
if (applicationPackage.trustedCertificates().isEmpty())
return applicationPackage;
Run run = controller.jobController().last(id, type)
.orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found"));
if (run.testerCertificate().isEmpty())
return applicationPackage;
return applicationPackage.withTrustedCertificate(run.testerCertificate().get());
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
try {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
catch (RuntimeException s) {
e.addSuppressed(s);
throw e;
}
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
deploymentSpecValidator.validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return application;
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
return deploy(application.id(), applicationPackage, zone, options, Set.of(), /* No application cert */ null);
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Set.of(), /* No application cert for tester*/ null);
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions, Set<ContainerEndpoint> endpoints,
ApplicationCertificate applicationCertificate) {
DeploymentId deploymentId = new DeploymentId(application, zone);
try {
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, Set.of(), endpoints, applicationCertificate, applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
routingPolicies.refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod) {
try (RotationLock rotationLock = rotationRepository.lock()) {
var rotations = rotationRepository.getOrAssignRotations(application.get(), rotationLock);
application = application.with(rotations);
store(application);
}
}
return application;
}
/**
* Register endpoints for rotations assigned to given application and zone in DNS.
*
* @return the registered endpoints
*/
private Set<ContainerEndpoint> registerEndpointsInDns(Application application, ZoneId zone) {
var containerEndpoints = new HashSet<ContainerEndpoint>();
var registerLegacyNames = application.deploymentSpec().globalServiceId().isPresent();
for (var assignedRotation : application.rotations()) {
var names = new ArrayList<String>();
var endpoints = application.endpointsIn(controller.system(), assignedRotation.endpointId())
.scope(Endpoint.Scope.global);
if (!registerLegacyNames && !assignedRotation.regions().contains(zone.region())) {
continue;
}
if (!registerLegacyNames) {
endpoints = endpoints.legacy(false);
}
var rotation = rotationRepository.getRotation(assignedRotation.rotationId());
if (rotation.isPresent()) {
endpoints.asList().forEach(endpoint -> {
controller.nameServiceForwarder().createCname(RecordName.from(endpoint.dnsName()),
RecordData.fqdn(rotation.get().name()),
Priority.normal);
names.add(endpoint.dnsName());
});
}
names.add(assignedRotation.rotationId().asString());
containerEndpoints.add(new ContainerEndpoint(assignedRotation.clusterId().value(), names));
}
return Collections.unmodifiableSet(containerEndpoints);
}
private Optional<ApplicationCertificate> getApplicationCertificate(Application application) {
boolean provisionCertificate = provisionApplicationCertificate.with(FetchVector.Dimension.APPLICATION_ID,
application.id().serializedForm()).value();
if (!provisionCertificate) {
return Optional.empty();
}
Optional<ApplicationCertificate> applicationCertificate = curator.readApplicationCertificate(application.id());
if(applicationCertificate.isPresent())
return applicationCertificate;
ApplicationCertificate newCertificate = controller.serviceRegistry().applicationCertificateProvider().requestCaSignedCertificate(application.id(), dnsNamesOf(application.id()));
curator.writeApplicationCertificate(application.id(), newCertificate);
return Optional.of(newCertificate);
}
/** Returns all valid DNS names of given application */
private List<String> dnsNamesOf(ApplicationId applicationId) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, controller.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.default_());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = controller.zoneRegistry().zones().directlyRouted().zones().stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone.getId()),
Endpoint.of(applicationId).wildcard(zone.getId())
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(controller.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = List.of(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Returns the endpoints of the deployment, or empty if the request fails */
public List<URI> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::endpoint)
.map(URI::create)
.iterator());
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId, e);
return Collections.emptyList();
}
}
/** Returns the non-empty endpoints per cluster in the given deployment, or empty if endpoints can't be found. */
public Map<ClusterSpec.Id, URI> clusterEndpoints(DeploymentId id) {
if ( ! get(id.applicationId())
.map(application -> application.deployments().containsKey(id.zoneId()))
.orElse(id.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", id.toString());
try {
var endpoints = routingGenerator.clusterEndpoints(id);
if ( ! endpoints.isEmpty())
return endpoints;
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + id, e);
}
return routingPolicies.get(id).stream()
.filter(policy -> policy.endpointIn(controller.system()).scope() == Endpoint.Scope.zone)
.collect(Collectors.toUnmodifiableMap(policy -> policy.cluster(),
policy -> policy.endpointIn(controller.system()).url()));
}
/** Returns all zone-specific cluster endpoints for the given application, in the given zones. */
public Map<ZoneId, Map<ClusterSpec.Id, URI>> clusterEndpoints(ApplicationId id, Collection<ZoneId> zones) {
Map<ZoneId, Map<ClusterSpec.Id, URI>> deployments = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (ZoneId zone : zones) {
var endpoints = clusterEndpoints(new DeploymentId(id, zone));
if ( ! endpoints.isEmpty())
deployments.put(zone, endpoints);
}
return Collections.unmodifiableMap(deployments);
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId applicationId, Optional<Credentials> credentials) {
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.user && credentials.isEmpty())
throw new IllegalArgumentException("Could not delete application '" + applicationId + "': No credentials provided");
if (controller.applications().get(applicationId).isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
lockOrThrow(applicationId, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(Collectors.joining(", ")));
curator.removeApplication(applicationId);
applicationStore.removeAll(applicationId);
applicationStore.removeAll(TesterId.of(applicationId));
application.get().rotations().forEach(assignedRotation -> {
var endpoints = application.get().endpointsIn(controller.system(), assignedRotation.endpointId());
endpoints.asList().stream()
.map(Endpoint::dnsName)
.forEach(name -> {
controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(name), Priority.normal);
});
});
log.info("Deleted " + application);
});
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
} catch (NotFoundException ignored) {
} finally {
routingPolicies.refresh(application.get().id(), application.get().deploymentSpec(), zone);
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0 && ! application.change().isPinned()
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
/** Returns the rotation repository, used for managing global rotation assignments */
public RotationRepository rotationRepository() {
return rotationRepository;
}
public RoutingPolicies routingPolicies() {
return routingPolicies;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName Tenant where application should be deployed
* @param applicationPackage Application package
* @param deployer Principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
applicationPackage.deploymentSpec().athenzDomain().ifPresent(identityDomain -> {
Tenant tenant = controller.tenants().require(tenantName);
deployer.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.ifPresentOrElse(user -> {
if ( ! ((AthenzFacade) accessControl).hasTenantAdminAccess(user, new AthenzDomain(identityDomain.value())))
throw new IllegalArgumentException("User " + user.getFullName() + " is not allowed to launch " +
"services in Athenz domain " + identityDomain.value() + ". " +
"Please reach out to the domain admin.");
},
() -> {
if (tenant.type() != Tenant.Type.athenz)
throw new IllegalArgumentException("Athenz domain defined in deployment.xml, but no " +
"Athenz domain for tenant " + tenantName.value());
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.value() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
});
});
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(athenzDomain -> {
controller.zoneRegistry().zones().reachable().ids()
.forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerAthenzIdentity(zone);
deploymentSpec.athenzService(zone.environment(), zone.region())
.map(service -> new AthenzService(athenzDomain.value(), service.value()))
.ifPresent(service -> {
boolean allowedToLaunch = ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, service);
if (!allowedToLaunch)
throw new IllegalArgumentException("Not allowed to launch Athenz service " + service.getFullName());
});
});
});
}
/** Returns the latest known version within the given major. */
private Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.versionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final ApplicationStore applicationStore;
private final RotationRepository rotationRepository;
private final AccessControl accessControl;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final RoutingPolicies routingPolicies;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
private final BooleanFlag provisionApplicationCertificate;
private final DeploymentSpecValidator deploymentSpecValidator;
ApplicationController(Controller controller, CuratorDb curator,
AccessControl accessControl, RotationsConfig rotationsConfig,
Clock clock) {
this.controller = controller;
this.curator = curator;
this.accessControl = accessControl;
this.configServer = controller.serviceRegistry().configServer();
this.routingGenerator = controller.serviceRegistry().routingGenerator();
this.clock = clock;
this.artifactRepository = controller.serviceRegistry().artifactRepository();
this.applicationStore = controller.serviceRegistry().applicationStore();
routingPolicies = new RoutingPolicies(controller);
rotationRepository = new RotationRepository(rotationsConfig, this, curator);
deploymentTrigger = new DeploymentTrigger(controller, controller.serviceRegistry().buildService(), clock);
provisionApplicationCertificate = Flags.PROVISION_APPLICATION_CERTIFICATE.bindTo(controller.flagSource());
deploymentSpecValidator = new DeploymentSpecValidator(controller);
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
int count = 0;
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
public ArtifactRepository artifacts() { return artifactRepository; }
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
public Version oldestInstalledPlatform(ApplicationId id) {
return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
.flatMap(zone -> configServer.nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
.map(Node::currentVersion)
.filter(version -> ! version.isEmpty())
.min(naturalOrder()))
.orElse(controller.systemVersion());
}
/** Change the global endpoint status for given deployment */
public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) {
findGlobalEndpoint(deployment).map(endpoint -> {
try {
configServer.setGlobalRotationStatus(deployment, endpoint.upstreamName(), status);
return endpoint;
} catch (Exception e) {
throw new RuntimeException("Failed to set rotation status of " + deployment, e);
}
}).orElseThrow(() -> new IllegalArgumentException("No global endpoint exists for " + deployment));
}
/** Get global endpoint status for given deployment */
public Map<RoutingEndpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) {
return findGlobalEndpoint(deployment).map(endpoint -> {
try {
EndpointStatus status = configServer.getGlobalRotationStatus(deployment, endpoint.upstreamName());
return Map.of(endpoint, status);
} catch (Exception e) {
throw new RuntimeException("Failed to get rotation status of " + deployment, e);
}
}).orElseGet(Collections::emptyMap);
}
/** Find the global endpoint of given deployment, if any */
private Optional<RoutingEndpoint> findGlobalEndpoint(DeploymentId deployment) {
return routingGenerator.endpoints(deployment).stream()
.filter(RoutingEndpoint::isGlobal)
.findFirst();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<Credentials> credentials) {
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().get(id.tenant());
if (tenant.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (tenant.get().type() != Tenant.Type.user) {
if (credentials.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': No credentials provided");
if ( ! id.instance().isTester())
accessControl.createApplication(id, credentials.get());
}
LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
}
}
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
return deploy(applicationId, zone, applicationPackageFromDeployer, Optional.empty(), options, Optional.empty());
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deploy(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
Optional<ApplicationVersion> applicationVersionFromDeployer,
DeployOptions options,
Optional<Principal> deployingIdentity) {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() == Tenant.Type.user && get(applicationId).isEmpty())
createApplication(applicationId, Optional.empty());
try (Lock deploymentLock = lockForDeployment(applicationId, zone)) {
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
Set<ContainerEndpoint> endpoints;
Optional<ApplicationCertificate> applicationCertificate;
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(require(applicationId), lock);
boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
if (manuallyDeployed) {
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
platformVersion = options.vespaVersion.map(Version::new).orElse(applicationPackage.deploymentSpec().majorVersion()
.flatMap(this::lastCompatibleVersion)
.orElseGet(controller::systemVersion));
}
else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + "."));
Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
if ( job.isEmpty()
|| job.get().lastTriggered().isEmpty()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
return unexpectedDeployment(applicationId, zone);
JobRun triggered = job.get().lastTriggered().get();
platformVersion = preferOldestVersion ? triggered.sourcePlatform().orElse(triggered.platform())
: triggered.platform();
applicationVersion = preferOldestVersion ? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
applicationPackage = getApplicationPackage(application.get(), applicationVersion);
applicationPackage = withTesterCertificate(applicationPackage, applicationId, jobType);
validateRun(application.get(), zone, platformVersion, applicationVersion);
}
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage, deployingIdentity);
application = withRotation(application, zone);
endpoints = registerEndpointsInDns(application.get(), zone);
if (controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) {
List<? extends ZoneApi> zones = controller.zoneRegistry().zones().all().zones();
applicationCertificate = getApplicationCertificate(application.get());
} else {
applicationCertificate = Optional.empty();
}
if ( ! preferOldestVersion
&& ! application.get().deploymentJobs().deployedInternally()
&& ! zone.environment().isManuallyDeployed())
storeWithUpdatedConfig(application, applicationPackage);
}
options = withVersion(platformVersion, options);
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, endpoints,
applicationCertificate.orElse(null));
lockOrThrow(applicationId, application ->
store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
warningsFrom(result))));
return result;
}
}
private ApplicationPackage withTesterCertificate(ApplicationPackage applicationPackage, ApplicationId id, JobType type) {
if (applicationPackage.trustedCertificates().isEmpty())
return applicationPackage;
Run run = controller.jobController().last(id, type)
.orElseThrow(() -> new IllegalStateException("Last run of " + type + " for " + id + " not found"));
if (run.testerCertificate().isEmpty())
return applicationPackage;
return applicationPackage.withTrustedCertificate(run.testerCertificate().get());
}
/** Fetches the requested application package from the artifact store(s). */
public ApplicationPackage getApplicationPackage(Application application, ApplicationVersion version) {
try {
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(applicationStore.get(application.id(), version))
: new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()));
}
catch (RuntimeException e) {
try {
log.info("Fetching application package for " + application.id() + " from alternate repository; it is now deployed "
+ (application.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
return application.deploymentJobs().deployedInternally()
? new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), version.id()))
: new ApplicationPackage(applicationStore.get(application.id(), version));
}
catch (RuntimeException s) {
e.addSuppressed(s);
throw e;
}
}
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
deploymentSpecValidator.validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = withoutDeletedDeployments(application);
application = withoutUnreferencedDeploymentJobs(application);
store(application);
return application;
}
/** Deploy a system application to given zone */
public void deploy(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
deploySystemApplicationPackage(application, zone, version);
} else {
configServer.nodeRepository().upgrade(zone, application.nodeType(), version);
}
}
/** Deploy a system application to given zone */
public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) {
if (application.hasApplicationPackage()) {
ApplicationPackage applicationPackage = new ApplicationPackage(
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
return deploy(application.id(), applicationPackage, zone, options, Set.of(), /* No application cert */ null);
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
return deploy(tester.id(), applicationPackage, zone, options, Set.of(), /* No application cert for tester*/ null);
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions, Set<ContainerEndpoint> endpoints,
ApplicationCertificate applicationCertificate) {
DeploymentId deploymentId = new DeploymentId(application, zone);
try {
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, deployOptions, Set.of(), endpoints, applicationCertificate, applicationPackage.zippedContent());
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
} finally {
routingPolicies.refresh(application, applicationPackage.deploymentSpec(), zone);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod) {
try (RotationLock rotationLock = rotationRepository.lock()) {
var rotations = rotationRepository.getOrAssignRotations(application.get(), rotationLock);
application = application.with(rotations);
store(application);
}
}
return application;
}
/**
* Register endpoints for rotations assigned to given application and zone in DNS.
*
* @return the registered endpoints
*/
private Set<ContainerEndpoint> registerEndpointsInDns(Application application, ZoneId zone) {
var containerEndpoints = new HashSet<ContainerEndpoint>();
var registerLegacyNames = application.deploymentSpec().globalServiceId().isPresent();
for (var assignedRotation : application.rotations()) {
var names = new ArrayList<String>();
var endpoints = application.endpointsIn(controller.system(), assignedRotation.endpointId())
.scope(Endpoint.Scope.global);
if (!registerLegacyNames && !assignedRotation.regions().contains(zone.region())) {
continue;
}
if (!registerLegacyNames) {
endpoints = endpoints.legacy(false);
}
var rotation = rotationRepository.getRotation(assignedRotation.rotationId());
if (rotation.isPresent()) {
endpoints.asList().forEach(endpoint -> {
controller.nameServiceForwarder().createCname(RecordName.from(endpoint.dnsName()),
RecordData.fqdn(rotation.get().name()),
Priority.normal);
names.add(endpoint.dnsName());
});
}
names.add(assignedRotation.rotationId().asString());
containerEndpoints.add(new ContainerEndpoint(assignedRotation.clusterId().value(), names));
}
return Collections.unmodifiableSet(containerEndpoints);
}
private Optional<ApplicationCertificate> getApplicationCertificate(Application application) {
boolean provisionCertificate = provisionApplicationCertificate.with(FetchVector.Dimension.APPLICATION_ID,
application.id().serializedForm()).value();
if (!provisionCertificate) {
return Optional.empty();
}
Optional<ApplicationCertificate> applicationCertificate = curator.readApplicationCertificate(application.id());
if(applicationCertificate.isPresent())
return applicationCertificate;
ApplicationCertificate newCertificate = controller.serviceRegistry().applicationCertificateProvider().requestCaSignedCertificate(application.id(), dnsNamesOf(application.id()));
curator.writeApplicationCertificate(application.id(), newCertificate);
return Optional.of(newCertificate);
}
/** Returns all valid DNS names of given application */
private List<String> dnsNamesOf(ApplicationId applicationId) {
List<String> endpointDnsNames = new ArrayList<>();
endpointDnsNames.add(Endpoint.createHashedCn(applicationId, controller.system()));
var globalDefaultEndpoint = Endpoint.of(applicationId).named(EndpointId.default_());
var rotationEndpoints = Endpoint.of(applicationId).wildcard();
var zoneLocalEndpoints = controller.zoneRegistry().zones().directlyRouted().zones().stream().flatMap(zone -> Stream.of(
Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone.getId()),
Endpoint.of(applicationId).wildcard(zone.getId())
));
Stream.concat(Stream.of(globalDefaultEndpoint, rotationEndpoints), zoneLocalEndpoints)
.map(Endpoint.EndpointBuilder::directRouting)
.map(endpoint -> endpoint.on(Endpoint.Port.tls()))
.map(endpointBuilder -> endpointBuilder.in(controller.system()))
.map(Endpoint::dnsName).forEach(endpointDnsNames::add);
return Collections.unmodifiableList(endpointDnsNames);
}
private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = List.of(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
.filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication withoutUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.deployDirectly,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Returns the endpoints of the deployment, or empty if the request fails */
public List<URI> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
return ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::endpoint)
.map(URI::create)
.iterator());
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId, e);
return Collections.emptyList();
}
}
/** Returns the non-empty endpoints per cluster in the given deployment, or empty if endpoints can't be found. */
public Map<ClusterSpec.Id, URI> clusterEndpoints(DeploymentId id) {
if ( ! get(id.applicationId())
.map(application -> application.deployments().containsKey(id.zoneId()))
.orElse(id.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", id.toString());
try {
var endpoints = routingGenerator.clusterEndpoints(id);
if ( ! endpoints.isEmpty())
return endpoints;
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + id, e);
}
return routingPolicies.get(id).stream()
.filter(policy -> policy.endpointIn(controller.system()).scope() == Endpoint.Scope.zone)
.collect(Collectors.toUnmodifiableMap(policy -> policy.cluster(),
policy -> policy.endpointIn(controller.system()).url()));
}
/** Returns all zone-specific cluster endpoints for the given application, in the given zones. */
public Map<ZoneId, Map<ClusterSpec.Id, URI>> clusterEndpoints(ApplicationId id, Collection<ZoneId> zones) {
Map<ZoneId, Map<ClusterSpec.Id, URI>> deployments = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (ZoneId zone : zones) {
var endpoints = clusterEndpoints(new DeploymentId(id, zone));
if ( ! endpoints.isEmpty())
deployments.put(zone, endpoints);
}
return Collections.unmodifiableMap(deployments);
}
/**
* Deletes the the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
/**
* Deletes the the given application instance.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if the instance does not exist
*/
public void deleteInstance(ApplicationId applicationId, Optional<Credentials> credentials) {
Tenant tenant = controller.tenants().require(applicationId.tenant());
if (tenant.type() != Tenant.Type.user && credentials.isEmpty())
throw new IllegalArgumentException("Could not delete application '" + applicationId + "': No credentials provided");
if (controller.applications().get(applicationId).isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
lockOrThrow(applicationId, application -> {
if ( ! application.get().deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
application.get().deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(Collectors.joining(", ")));
curator.removeApplication(applicationId);
applicationStore.removeAll(applicationId);
applicationStore.removeAll(TesterId.of(applicationId));
application.get().rotations().forEach(assignedRotation -> {
var endpoints = application.get().endpointsIn(controller.system(), assignedRotation.endpointId());
endpoints.asList().stream()
.map(Endpoint::dnsName)
.forEach(name -> {
controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(name), Priority.normal);
});
});
log.info("Deleted " + application);
});
if ( tenant.type() != Tenant.Type.user
&& controller.applications().asList(applicationId.tenant()).stream()
.map(application -> application.id().application())
.noneMatch(applicationId.application()::equals))
accessControl.deleteApplication(applicationId, credentials.get());
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application.get());
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
configServer.restart(deploymentId, hostname);
}
/**
* Asks the config server whether this deployment is currently <i>suspended</i>:
* Not in a state where it should receive traffic.
*/
public boolean isSuspended(DeploymentId deploymentId) {
try {
return configServer.isSuspended(deploymentId);
}
catch (ConfigServerException e) {
if (e.getErrorCode() == ConfigServerException.ErrorCode.NOT_FOUND)
return false;
throw e;
}
}
/** Deactivate application in the given zone */
public void deactivate(ApplicationId application, ZoneId zone) {
lockOrThrow(application, lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.get().id(), zone));
} catch (NotFoundException ignored) {
} finally {
routingPolicies.refresh(application.get().id(), application.get().deploymentSpec(), zone);
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application);
}
/**
* Returns a lock which provides exclusive rights to deploying this application to the given zone.
*/
private Lock lockForDeployment(ApplicationId application, ZoneId zone) {
return curator.lockForDeployment(application, zone);
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0 && ! application.change().isPinned()
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
/** Returns the rotation repository, used for managing global rotation assignments */
public RotationRepository rotationRepository() {
return rotationRepository;
}
public RoutingPolicies routingPolicies() {
return routingPolicies;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
* 1. Verify that the Athenz service can be launched by the config server
* 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain
* 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml
*
* @param tenantName Tenant where application should be deployed
* @param applicationPackage Application package
* @param deployer Principal initiating the deployment, possibly empty
*/
public void verifyApplicationIdentityConfiguration(TenantName tenantName, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec());
applicationPackage.deploymentSpec().athenzDomain().ifPresent(identityDomain -> {
Tenant tenant = controller.tenants().require(tenantName);
deployer.filter(AthenzPrincipal.class::isInstance)
.map(AthenzPrincipal.class::cast)
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.ifPresentOrElse(user -> {
if ( ! ((AthenzFacade) accessControl).hasTenantAdminAccess(user, new AthenzDomain(identityDomain.value())))
throw new IllegalArgumentException("User " + user.getFullName() + " is not allowed to launch " +
"services in Athenz domain " + identityDomain.value() + ". " +
"Please reach out to the domain admin.");
},
() -> {
if (tenant.type() != Tenant.Type.athenz)
throw new IllegalArgumentException("Athenz domain defined in deployment.xml, but no " +
"Athenz domain for tenant " + tenantName.value());
AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain();
if ( ! Objects.equals(tenantDomain.getName(), identityDomain.value()))
throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.value() + "] " +
"must match tenant domain: [" + tenantDomain.getName() + "]");
});
});
}
/*
* Verifies that the configured athenz service (if any) can be launched.
*/
private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) {
deploymentSpec.athenzDomain().ifPresent(athenzDomain -> {
controller.zoneRegistry().zones().reachable().ids()
.forEach(zone -> {
AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerAthenzIdentity(zone);
deploymentSpec.athenzService(zone.environment(), zone.region())
.map(service -> new AthenzService(athenzDomain.value(), service.value()))
.ifPresent(service -> {
boolean allowedToLaunch = ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, service);
if (!allowedToLaunch)
throw new IllegalArgumentException("Not allowed to launch Athenz service " + service.getFullName());
});
});
});
}
/** Returns the latest known version within the given major. */
private Optional<Version> lastCompatibleVersion(int targetMajorVersion) {
return controller.versionStatus().versions().stream()
.map(VespaVersion::versionNumber)
.filter(version -> version.getMajor() == targetMajorVersion)
.max(naturalOrder());
}
/** Extract deployment warnings metric from deployment result */
private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) {
if (result.prepareResponse().log == null) return Map.of();
Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
for (Log log : result.prepareResponse().log) {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
return Map.copyOf(warnings);
}
} |
Nit: You could just call this method "at". | public void testBlockVersionChangeHalfwayThoughThenNewRevision() {
DeploymentTester tester = new DeploymentTester().atInstant(Instant.parse("2017-09-29T16:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.blockChange(false, true, "mon-fri", "00-09,17-23", "UTC")
.blockChange(false, true, "sat-sun", "00-23", "UTC")
.region("us-west-1")
.region("us-central-1")
.region("us-east-3")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofHours(1));
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, false, productionUsCentral1);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app, applicationPackage, false, productionUsCentral1);
assertEquals(2, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofDays(1));
version = Version.fromString("6.4");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs());
tester.clock().advance(Duration.ofDays(1));
tester.clock().advance(Duration.ofHours(17));
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
for (Deployment deployment : tester.applications().require(app.id()).deployments().values())
assertEquals(version, deployment.version());
} | DeploymentTester tester = new DeploymentTester().atInstant(Instant.parse("2017-09-29T16:00:00.00Z")); | public void testBlockVersionChangeHalfwayThoughThenNewRevision() {
DeploymentTester tester = new DeploymentTester().at(Instant.parse("2017-09-29T16:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.blockChange(false, true, "mon-fri", "00-09,17-23", "UTC")
.blockChange(false, true, "sat-sun", "00-23", "UTC")
.region("us-west-1")
.region("us-central-1")
.region("us-east-3")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofHours(1));
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, false, productionUsCentral1);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app, applicationPackage, false, productionUsCentral1);
assertEquals(2, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofDays(1));
version = Version.fromString("6.4");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs());
tester.clock().advance(Duration.ofDays(1));
tester.clock().advance(Duration.ofHours(17));
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
for (Deployment deployment : tester.applications().require(app.id()).deployments().values())
assertEquals(version, deployment.version());
} | class UpgraderTest {
@Test
public void testUpgrading() {
DeploymentTester tester = new DeploymentTester();
Version version0 = Version.fromString("6.2");
tester.upgradeSystem(version0);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("No applications: Nothing to do", 0, tester.buildService().jobs().size());
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application conservative0 = tester.createAndDeploy("conservative0", 6, "conservative");
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("All already on the right version: Nothing to do", 0, tester.buildService().jobs().size());
Version version1 = Version.fromString("6.3");
tester.upgradeSystem(version1);
assertEquals(version1, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version1, "canary");
assertEquals(version1, tester.configServer().lastPrepareVersion().get());
tester.upgradeSystem(version1);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("One canary pending; nothing else", 2, tester.buildService().jobs().size());
tester.completeUpgrade(canary1, version1, "canary");
tester.upgradeSystem(version1);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Canaries done: Should upgrade defaults", 6, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version1, "default");
tester.completeUpgrade(default1, version1, "default");
tester.completeUpgrade(default2, version1, "default");
tester.upgradeSystem(version1);
assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Normals done: Should upgrade conservatives", 2, tester.buildService().jobs().size());
tester.completeUpgrade(conservative0, version1, "conservative");
tester.upgradeSystem(version1);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Nothing to do", 0, tester.buildService().jobs().size());
Version version2 = Version.fromString("6.4");
tester.upgradeSystem(version2);
assertEquals(version2, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
tester.completeUpgradeWithError(canary0, version2, "canary", stagingTest);
tester.upgradeSystem(version2);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.jobCompletion(stagingTest).application(canary0).unsuccessful().submit();
assertEquals("Version broken, but Canaries should keep trying", 3, tester.buildService().jobs().size());
Version version3 = Version.fromString("6.5");
tester.upgradeSystem(version3);
assertEquals(version3, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.buildService().remove(ControllerTester.buildJob(canary0, stagingTest));
tester.buildService().remove(ControllerTester.buildJob(canary1, systemTest));
tester.buildService().remove(ControllerTester.buildJob(canary1, stagingTest));
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version3, "canary");
assertEquals(version3, tester.configServer().lastPrepareVersion().get());
tester.upgradeSystem(version3);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("One canary pending; nothing else", 2, tester.buildService().jobs().size());
tester.completeUpgrade(canary1, version3, "canary");
tester.upgradeSystem(version3);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Canaries done: Should upgrade defaults", 6, tester.buildService().jobs().size());
tester.completeUpgradeWithError(default0, version3, "default", stagingTest);
tester.completeUpgrade(default1, version3, "default");
tester.completeUpgrade(default2, version3, "default");
tester.upgradeSystem(version3);
assertEquals("Not enough evidence to mark this as neither broken nor high",
VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
assertEquals("Upgrade with error should retry", 1, tester.buildService().jobs().size());
tester.jobCompletion(component).application(default0).nextBuildNumber().uploadArtifact(DeploymentTester.applicationPackage("default")).submit();
tester.jobCompletion(stagingTest).application(default0).unsuccessful().submit();
tester.deployAndNotify(default0, "default", true, systemTest);
tester.deployAndNotify(default0, "default", true, stagingTest);
tester.deployAndNotify(default0, "default", true, productionUsWest1);
tester.deployAndNotify(default0, "default", true, productionUsEast3);
tester.upgradeSystem(version3);
assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Normals done: Should upgrade conservatives", 2, tester.buildService().jobs().size());
tester.completeUpgrade(conservative0, version3, "conservative");
tester.upgradeSystem(version3);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Applications are on " + version3 + " - nothing to do", 0, tester.buildService().jobs().size());
Version version4 = Version.fromString("6.6");
Application default3 = tester.createAndDeploy("default3", 7, "default");
Application default4 = tester.createAndDeploy("default4", 8, "default");
tester.upgradeSystem(version4);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version4, "canary");
tester.completeUpgrade(canary1, version4, "canary");
tester.upgradeSystem(version4);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade of defaults are scheduled", 10, tester.buildService().jobs().size());
assertEquals(version4, tester.application(default0.id()).change().platform().get());
assertEquals(version4, tester.application(default1.id()).change().platform().get());
assertEquals(version4, tester.application(default2.id()).change().platform().get());
assertEquals(version4, tester.application(default3.id()).change().platform().get());
assertEquals(version4, tester.application(default4.id()).change().platform().get());
tester.completeUpgrade(default0, version4, "default");
Version version5 = Version.fromString("6.7");
tester.upgradeSystem(version5);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version5, "canary");
tester.completeUpgrade(canary1, version5, "canary");
tester.upgradeSystem(version5);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade of defaults are scheduled", 10, tester.buildService().jobs().size());
assertEquals(version5, tester.application(default0.id()).change().platform().get());
assertEquals(version4, tester.application(default1.id()).change().platform().get());
assertEquals(version4, tester.application(default2.id()).change().platform().get());
assertEquals(version4, tester.application(default3.id()).change().platform().get());
assertEquals(version4, tester.application(default4.id()).change().platform().get());
tester.completeUpgrade(default1, version4, "default");
tester.completeUpgrade(default2, version4, "default");
tester.completeUpgradeWithError(default3, version4, "default", stagingTest);
tester.completeUpgradeWithError(default4, version4, "default", JobType.productionUsWest1);
tester.clock().advance(Duration.ofHours(1));
tester.upgrader().maintain();
tester.jobCompletion(stagingTest).application(default3).unsuccessful().submit();
tester.jobCompletion(productionUsWest1).application(default4).unsuccessful().submit();
tester.completeUpgradeWithError(default0, version5, "default", stagingTest);
tester.completeUpgradeWithError(default1, version5, "default", stagingTest);
tester.completeUpgradeWithError(default2, version5, "default", stagingTest);
tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1)));
tester.readyJobTrigger().maintain();
tester.completeUpgradeWithError(default3, version5, "default", JobType.productionUsWest1);
tester.upgradeSystem(version5);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
tester.clock().advance(Duration.ofHours(1));
tester.jobCompletion(JobType.productionUsWest1).application(default3).unsuccessful().submit();
tester.upgrader().maintain();
tester.buildService().clear();
tester.triggerUntilQuiescence();
assertEquals("Upgrade of defaults are scheduled on " + version4 + " instead, since " + version5 + " is broken: " +
"This is default3 since it failed upgrade on both " + version4 + " and " + version5,
2, tester.buildService().jobs().size());
assertEquals(version4, tester.application(default3.id()).change().platform().get());
}
@Test
public void testUpgradingToVersionWhichBreaksSomeNonCanaries() {
DeploymentTester tester = new DeploymentTester();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("No system version: Nothing to do", 0, tester.buildService().jobs().size());
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("No applications: Nothing to do", 0, tester.buildService().jobs().size());
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application default3 = tester.createAndDeploy("default3", 6, "default");
Application default4 = tester.createAndDeploy("default4", 7, "default");
Application default5 = tester.createAndDeploy("default5", 8, "default");
Application default6 = tester.createAndDeploy("default6", 9, "default");
Application default7 = tester.createAndDeploy("default7", 10, "default");
Application default8 = tester.createAndDeploy("default8", 11, "default");
Application default9 = tester.createAndDeploy("default9", 12, "default");
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("All already on the right version: Nothing to do", 0, tester.buildService().jobs().size());
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
assertEquals(version, tester.configServer().lastPrepareVersion().get());
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("One canary pending; nothing else", 2, tester.buildService().jobs().size());
tester.completeUpgrade(canary1, version, "canary");
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Canaries done: Should upgrade defaults", 20, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version, "default");
tester.completeUpgradeWithError(default1, version, "default", systemTest);
tester.completeUpgradeWithError(default2, version, "default", systemTest);
tester.completeUpgradeWithError(default3, version, "default", systemTest);
tester.completeUpgradeWithError(default4, version, "default", systemTest);
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.buildService().clear();
tester.triggerUntilQuiescence();
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
assertEquals("Upgrades are cancelled", 0, tester.buildService().jobs().size());
}
@Test
public void testDeploymentAlreadyInProgressForUpgrade() {
DeploymentTester tester = new DeploymentTester();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.environment(Environment.prod)
.region("us-east-3")
.build();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
Application app = tester.createApplication("app1", "tenant1", 1, 11L);
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Application is on expected version: Nothing to do", 0,
tester.buildService().jobs().size());
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, false, stagingTest);
assertTrue("Failure is recorded", tester.application(app.id()).deploymentJobs().hasFailures());
assertTrue("Application has pending change", tester.application(app.id()).change().hasTargets());
version = Version.fromString("6.4");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.jobCompletion(stagingTest).application(app).unsuccessful().submit();
assertTrue("Application still has failures", tester.application(app.id()).deploymentJobs().hasFailures());
assertEquals(2, tester.buildService().jobs().size());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
}
@Test
public void testUpgradeCancelledWithDeploymentInProgress() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application default3 = tester.createAndDeploy("default3", 6, "default");
Application default4 = tester.createAndDeploy("default4", 7, "default");
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version, "canary");
tester.completeUpgrade(canary1, version, "canary");
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for remaining apps", 10, tester.buildService().jobs().size());
tester.completeUpgradeWithError(default0, version, "default", systemTest);
tester.completeUpgradeWithError(default1, version, "default", systemTest);
tester.completeUpgradeWithError(default2, version, "default", systemTest);
tester.completeUpgradeWithError(default3, version, "default", systemTest);
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertFalse("No change present", tester.applications().require(default4.id()).change().hasTargets());
tester.jobCompletion(systemTest).application(default0).submit();
tester.jobCompletion(systemTest).application(default1).submit();
tester.jobCompletion(systemTest).application(default2).submit();
tester.jobCompletion(systemTest).application(default3).submit();
tester.jobCompletion(systemTest).application(default4).submit();
tester.jobCompletion(stagingTest).application(default0).submit();
tester.jobCompletion(stagingTest).application(default1).submit();
tester.jobCompletion(stagingTest).application(default2).submit();
tester.jobCompletion(stagingTest).application(default3).submit();
tester.jobCompletion(stagingTest).application(default4).submit();
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
/**
* Scenario:
* An application A is on version V0
* Version V2 is released.
* A upgrades one production zone to V2.
* V2 is marked as broken and upgrade of A to V2 is cancelled.
* Upgrade of A to V1 is scheduled: Should skip the zone on V2 but upgrade the next zone to V1
*/
@Test
public void testVersionIsBrokenAfterAZoneIsLive() {
DeploymentTester tester = new DeploymentTester();
Version v0 = Version.fromString("6.2");
tester.upgradeSystem(v0);
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application default3 = tester.createAndDeploy("default3", 6, "default");
Application default4 = tester.createAndDeploy("default4", 7, "default");
Version v1 = Version.fromString("6.3");
tester.upgradeSystem(v1);
assertEquals(v1, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, v1, "canary");
tester.completeUpgrade(canary1, v1, "canary");
tester.upgradeSystem(v1);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
Version v2 = Version.fromString("6.4");
tester.upgradeSystem(v2);
assertEquals(v2, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, v2, "canary");
tester.completeUpgrade(canary1, v2, "canary");
tester.upgradeSystem(v2);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.deploymentTrigger().cancelChange(default0.id(), ALL);
tester.deploymentTrigger().cancelChange(default1.id(), ALL);
tester.deploymentTrigger().cancelChange(default2.id(), ALL);
tester.deploymentTrigger().cancelChange(default3.id(), ALL);
tester.buildService().clear();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for remaining apps", 10, tester.buildService().jobs().size());
assertEquals("default4 is still upgrading to 5.1", v1, tester.application(default4.id()).change().platform().get());
tester.completeUpgradeWithError(default0, v2, "default", productionUsEast3);
tester.completeUpgradeWithError(default1, v2, "default", productionUsEast3);
tester.completeUpgradeWithError(default2, v2, "default", productionUsEast3);
tester.completeUpgradeWithError(default3, v2, "default", productionUsEast3);
tester.upgradeSystem(v2);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
assertEquals(v2, tester.application("default0").deployments().get(ZoneId.from("prod.us-west-1")).version());
assertEquals(v0, tester.application("default0").deployments().get(ZoneId.from("prod.us-east-3")).version());
tester.upgrader().maintain();
tester.buildService().clear();
tester.triggerUntilQuiescence();
assertEquals("Upgrade to 5.1 scheduled for apps not completely on 5.1 or 5.2", 10, tester.buildService().jobs().size());
tester.deployAndNotify(tester.application("default0"), "default", true, systemTest);
tester.deployAndNotify(tester.application("default0"), "default", true, stagingTest);
tester.deployAndNotify(tester.application("default0"), "default", true, productionUsEast3);
assertEquals(v2, tester.application("default0").deployments().get(ZoneId.from("prod.us-west-1")).version());
assertEquals("Last zone is upgraded to v1",
v1, tester.application("default0").deployments().get(ZoneId.from("prod.us-east-3")).version());
assertFalse(tester.application("default0").change().hasTargets());
}
@Test
public void testConfidenceIgnoresFailingApplicationChanges() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage canaryPolicy = DeploymentTester.applicationPackage("canary");
ApplicationPackage defaultPolicy = DeploymentTester.applicationPackage("default");
Application canary0 = tester.createAndDeploy("canary0", 1, canaryPolicy);
Application canary1 = tester.createAndDeploy("canary1", 2, canaryPolicy);
Application default0 = tester.createAndDeploy("default0", 3, defaultPolicy);
Application default1 = tester.createAndDeploy("default1", 4, defaultPolicy);
Application default2 = tester.createAndDeploy("default2", 5, defaultPolicy);
Application default3 = tester.createAndDeploy("default3", 6, defaultPolicy);
Application default4 = tester.createAndDeploy("default4", 7, defaultPolicy);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version, "canary");
tester.completeUpgrade(canary1, version, "canary");
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(default0, version, "default");
tester.completeUpgrade(default1, version, "default");
tester.completeUpgrade(default2, version, "default");
tester.completeUpgrade(default3, version, "default");
tester.completeUpgrade(default4, version, "default");
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence());
tester.jobCompletion(component).application(default0).nextBuildNumber().uploadArtifact(canaryPolicy).unsuccessful().submit();
tester.jobCompletion(component).application(default1).nextBuildNumber().uploadArtifact(canaryPolicy).unsuccessful().submit();
tester.jobCompletion(component).application(default2).nextBuildNumber().uploadArtifact(defaultPolicy).submit();
tester.jobCompletion(component).application(default3).nextBuildNumber().uploadArtifact(defaultPolicy).submit();
tester.jobCompletion(component).application(default2).nextBuildNumber().uploadArtifact(canaryPolicy).unsuccessful().submit();
tester.jobCompletion(component).application(default3).nextBuildNumber(2).uploadArtifact(canaryPolicy).unsuccessful().submit();
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
}
@Test
public void testBlockVersionChange() {
DeploymentTester tester = new DeploymentTester().atInstant(Instant.parse("2017-09-26T18:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.blockChange(false, true, "tue", "18-19", "UTC")
.region("us-west-1")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertTrue("No jobs scheduled", tester.buildService().jobs().isEmpty());
tester.clock().advance(Duration.ofHours(1));
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertTrue("No jobs scheduled", tester.buildService().jobs().isEmpty());
tester.clock().advance(Duration.ofHours(1));
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertFalse("Job is scheduled", tester.buildService().jobs().isEmpty());
tester.completeUpgrade(app, version, applicationPackage);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
public void testBlockVersionChangeHalfwayThough() {
DeploymentTester tester = new DeploymentTester().atInstant(Instant.parse("2017-09-26T17:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.blockChange(false, true, "tue", "18-19", "UTC")
.region("us-west-1")
.region("us-central-1")
.region("us-east-3")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofHours(1));
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
@Test
public void testReschedulesUpgradeAfterTimeout() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage canaryApplicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.environment(Environment.prod)
.region("us-west-1")
.build();
ApplicationPackage defaultApplicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
Application canary0 = tester.createAndDeploy("canary0", 1, canaryApplicationPackage);
Application canary1 = tester.createAndDeploy("canary1", 2, canaryApplicationPackage);
Application default0 = tester.createAndDeploy("default0", 3, defaultApplicationPackage);
Application default1 = tester.createAndDeploy("default1", 4, defaultApplicationPackage);
Application default2 = tester.createAndDeploy("default2", 5, defaultApplicationPackage);
Application default3 = tester.createAndDeploy("default3", 6, defaultApplicationPackage);
Application default4 = tester.createAndDeploy("default4", 7, defaultApplicationPackage);
assertEquals(version, default0.oldestDeployedPlatform().get());
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version, canaryApplicationPackage);
tester.completeUpgrade(canary1, version, canaryApplicationPackage);
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.clock().advance(Duration.ofMinutes(1));
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for remaining apps", 10, tester.buildService().jobs().size());
tester.completeUpgradeWithError(default0, version, defaultApplicationPackage, systemTest);
tester.completeUpgradeWithError(default1, version, defaultApplicationPackage, systemTest);
tester.completeUpgradeWithError(default2, version, defaultApplicationPackage, systemTest);
tester.completeUpgradeWithError(default3, version, defaultApplicationPackage, systemTest);
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.jobCompletion(systemTest).application(default0).unsuccessful().submit();
tester.jobCompletion(systemTest).application(default1).unsuccessful().submit();
tester.jobCompletion(systemTest).application(default2).unsuccessful().submit();
tester.jobCompletion(systemTest).application(default3).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default0).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default1).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default2).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default3).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default4).unsuccessful().submit();
Application deadLocked = tester.applications().require(default4.id());
tester.assertRunning(systemTest, deadLocked.id());
assertFalse("No change present", deadLocked.change().hasTargets());
ApplicationPackage defaultApplicationPackageV2 = new ApplicationPackageBuilder()
.searchDefinition("search test { field test type string {} }")
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
tester.deployCompletely(default0, defaultApplicationPackageV2, 43);
tester.deployCompletely(default1, defaultApplicationPackageV2, 43);
tester.deployCompletely(default2, defaultApplicationPackageV2, 43);
tester.deployCompletely(default3, defaultApplicationPackageV2, 43);
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for previously failing apps, and hanging job still running", 9, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version, defaultApplicationPackageV2);
tester.completeUpgrade(default1, version, defaultApplicationPackageV2);
tester.completeUpgrade(default2, version, defaultApplicationPackageV2);
tester.completeUpgrade(default3, version, defaultApplicationPackageV2);
assertEquals(version, tester.application(default0.id()).oldestDeployedPlatform().get());
assertEquals(version, tester.application(default1.id()).oldestDeployedPlatform().get());
assertEquals(version, tester.application(default2.id()).oldestDeployedPlatform().get());
assertEquals(version, tester.application(default3.id()).oldestDeployedPlatform().get());
}
@Test
public void testThrottlesUpgrades() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
Upgrader upgrader = new Upgrader(tester.controller(), Duration.ofMinutes(10),
new JobControl(tester.controllerTester().curator()),
tester.controllerTester().curator());
upgrader.setUpgradesPerMinute(0.2);
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application default3 = tester.createAndDeploy("default3", 6, "default");
Application dev0 = tester.createApplication("dev0", "tenant1", 7, 1L);
tester.controllerTester().deploy(dev0, ZoneId.from(Environment.dev, RegionName.from("dev-region")));
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.triggerUntilQuiescence();
assertEquals(4, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
tester.completeUpgrade(canary1, version, "canary");
tester.upgradeSystem(version);
tester.readyJobTrigger().maintain();
assertEquals(4, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version, "default");
tester.completeUpgrade(default1, version, "default");
upgrader.maintain();
tester.triggerUntilQuiescence();
assertEquals(4, tester.buildService().jobs().size());
tester.completeUpgrade(default2, version, "default");
tester.completeUpgrade(default3, version, "default");
upgrader.maintain();
tester.triggerUntilQuiescence();
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
public void testPinningMajorVersionInDeploymentXml() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage version6ApplicationPackage = new ApplicationPackageBuilder()
.majorVersion(6)
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application default0 = tester.createAndDeploy("default0", 2, version6ApplicationPackage);
version = Version.fromString("7.0");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
assertEquals(0, tester.buildService().jobs().size());
tester.computeVersionStatus();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(0, tester.buildService().jobs().size());
}
@Test
public void testPinningMajorVersionInApplication() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage default0ApplicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application default0 = tester.createAndDeploy("default0", 2, default0ApplicationPackage);
tester.applications().lockOrThrow(default0.id(), a -> tester.applications().store(a.withMajorVersion(6)));
assertEquals(OptionalInt.of(6), tester.applications().get(default0.id()).get().majorVersion());
version = Version.fromString("7.0");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
assertEquals(0, tester.buildService().jobs().size());
tester.computeVersionStatus();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(0, tester.buildService().jobs().size());
}
@Test
public void testPinningMajorVersionInUpgrader() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage version7CanaryApplicationPackage = new ApplicationPackageBuilder()
.majorVersion(7)
.upgradePolicy("canary")
.environment(Environment.prod)
.region("us-west-1")
.build();
ApplicationPackage version7DefaultApplicationPackage = new ApplicationPackageBuilder()
.majorVersion(7)
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
Application canary0 = tester.createAndDeploy("canary", 1, version7CanaryApplicationPackage);
Application default0 = tester.createAndDeploy("default0", 2, version7DefaultApplicationPackage);
Application default1 = tester.createAndDeploy("default1", 3, "default");
tester.upgrader().setTargetMajorVersion(Optional.of(6));
version = Version.fromString("7.0");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, version7CanaryApplicationPackage);
assertEquals(0, tester.buildService().jobs().size());
tester.computeVersionStatus();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version, version7DefaultApplicationPackage);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(0, tester.buildService().jobs().size());
tester.upgrader().setTargetMajorVersion(Optional.empty());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(default1, version, "default");
}
@Test
public void testAllowApplicationChangeDuringFailingUpgrade() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, false, productionUsWest1);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
String applicationVersion = "1.0.43-commit1";
app = tester.application(app.id());
assertTrue("Change contains both upgrade and application change",
app.change().platform().get().equals(version) &&
app.change().application().get().id().equals(applicationVersion));
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.jobCompletion(productionUsWest1).application(app).unsuccessful().submit();
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
app = tester.application(app.id());
for (Deployment deployment : app.deployments().values()) {
assertEquals(version, deployment.version());
assertEquals(applicationVersion, deployment.applicationVersion().id());
}
}
@Test
public void testBlockRevisionChangeHalfwayThoughThenUpgrade() {
DeploymentTester tester = new DeploymentTester().atInstant(Instant.parse("2017-09-26T17:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.blockChange(true, false, "tue", "18-19", "UTC")
.region("us-west-1")
.region("us-central-1")
.region("us-east-3")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofHours(1));
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.triggerUntilQuiescence();
assertEquals(1, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs());
tester.upgrader().maintain();
tester.readyJobTrigger().maintain();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
public void testBlockRevisionChangeHalfwayThoughThenNewRevision() {
DeploymentTester tester = new DeploymentTester().atInstant(Instant.parse("2017-09-26T17:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.blockChange(true, false, "tue", "18-19", "UTC")
.region("us-west-1")
.region("us-central-1")
.region("us-east-3")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofHours(1));
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
tester.jobCompletion(component).application(app).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.triggerUntilQuiescence();
assertEquals(3, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs());
tester.outstandingChangeDeployer().run();
assertFalse(tester.application(app.id()).change().hasTargets());
tester.clock().advance(Duration.ofHours(2));
tester.outstandingChangeDeployer().run();
assertTrue(tester.application(app.id()).change().hasTargets());
tester.readyJobTrigger().run();
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertFalse(tester.application(app.id()).change().hasTargets());
}
@Test
public void testPinning() {
DeploymentTester tester = new DeploymentTester();
Version version0 = Version.fromString("6.2");
tester.upgradeSystem(version0);
Application application = tester.createApplication("application", "tenant", 2, 3);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().environment(Environment.prod)
.region("us-east-3")
.region("us-west-1")
.build();
tester.deploymentTrigger().forceChange(application.id(), Change.empty().withPin());
tester.deployCompletely(application, applicationPackage);
assertFalse(tester.application(application.id()).change().hasTargets());
assertTrue(tester.application(application.id()).change().isPinned());
assertEquals(2, tester.application(application.id()).deployments().size());
Version version1 = Version.fromString("6.3");
tester.upgradeSystem(version1);
tester.upgrader().maintain();
assertFalse(tester.application(application.id()).change().hasTargets());
assertTrue(tester.application(application.id()).change().isPinned());
tester.deployCompletely(application, applicationPackage, BuildJob.defaultBuildNumber + 1);
assertFalse(tester.application(application.id()).change().hasTargets());
assertTrue(tester.application(application.id()).change().isPinned());
tester.deploymentTrigger().cancelChange(application.id(), PIN);
tester.upgrader().maintain();
assertTrue(tester.application(application.id()).change().hasTargets());
assertFalse(tester.application(application.id()).change().isPinned());
tester.deploymentTrigger().forceChange(application.id(), Change.empty().withPin());
tester.upgrader().maintain();
tester.readyJobTrigger().maintain();
assertEquals(version1, tester.application(application.id()).change().platform().get());
tester.deployAndNotify(application, true, systemTest);
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsEast3);
tester.deploy(productionUsWest1, application, Optional.empty(), false);
tester.deployAndNotify(application, false, productionUsWest1);
tester.deploymentTrigger().cancelChange(application.id(), ALL);
tester.deploymentTrigger().forceChange(application.id(), Change.of(version0).withPin());
tester.buildService().clear();
assertEquals(version0, tester.application(application.id()).change().platform().get());
tester.readyJobTrigger().maintain();
tester.deployAndNotify(application, true, systemTest);
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsEast3);
assertTrue(tester.application(application.id()).change().hasTargets());
tester.deployAndNotify(application, true, productionUsWest1);
assertFalse(tester.application(application.id()).change().hasTargets());
}
@Test
public void upgradesToLatestAllowedMajor() {
DeploymentTester tester = new DeploymentTester();
Version version0 = Version.fromString("6.1");
tester.upgradeSystem(version0);
tester.upgrader().setTargetMajorVersion(Optional.of(6));
Application app1 = tester.createAndDeploy("app1", 1, "default");
Application app2 = tester.createAndDeploy("app2", 1, "default");
tester.controller().applications().lockIfPresent(app1.id(), app -> tester.controller().applications().store(app.withChange(app.get().change().withPin())));
Version version1 = Version.fromString("6.2");
tester.upgradeSystem(version1);
tester.upgrader().maintain();
tester.completeUpgrade(app2, version1, "default");
Version version2 = Version.fromString("7.1");
tester.upgradeSystem(version2);
tester.controller().applications().lockIfPresent(app2.id(), app -> tester.applications().store(app.withMajorVersion(7)));
tester.upgrader().maintain();
assertEquals(version2, tester.controller().applications().require(app2.id()).change().platform().get());
tester.controller().applications().lockIfPresent(app1.id(), app -> tester.controller().applications().store(app.withChange(app.get().change().withoutPin())));
tester.upgrader().maintain();
assertEquals("Application upgrades to latest allowed major", version1,
tester.controller().applications().require(app1.id()).change().platform().get());
}
@Test
public void testsEachUpgradeCombinationWithFailingDeployments() {
DeploymentTester tester = new DeploymentTester();
Application application = tester.createApplication("app1", "tenant1", 1, 1L);
Supplier<Application> app = () -> tester.application(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.region("us-west-1")
.region("us-east-3")
.build();
Version v1 = Version.fromString("6.1");
tester.deployCompletely(application, applicationPackage);
Version v2 = Version.fromString("6.2");
tester.upgradeSystem(v2);
tester.upgrader().maintain();
assertEquals(Change.of(v2), app.get().change());
tester.deployAndNotify(application, true, systemTest);
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsCentral1);
tester.upgrader().overrideConfidence(v2, VespaVersion.Confidence.broken);
tester.computeVersionStatus();
tester.upgrader().maintain();
tester.deployAndNotify(application, true, productionUsWest1);
assertTrue(app.get().change().isEmpty());
Version v3 = Version.fromString("6.3");
tester.upgradeSystem(v3);
tester.upgrader().maintain();
assertEquals(Change.of(v3), app.get().change());
tester.deployAndNotify(application, true, systemTest);
tester.deployAndNotify(application, true, stagingTest);
tester.deploy(productionUsCentral1, application, applicationPackage);
tester.readyJobTrigger().maintain();
tester.deployAndNotify(application, true, stagingTest);
assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().platform());
tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit();
tester.jobCompletion(productionUsCentral1).application(application).submit();
assertEquals(v3, app.get().deployments().get(productionUsCentral1.zone(main)).version());
assertEquals(v2, app.get().deployments().get(productionUsWest1.zone(main)).version());
assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main)).version());
tester.readyJobTrigger().maintain();
assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsWest1);
tester.readyJobTrigger().maintain();
assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsEast3);
assertTrue("Upgrade complete", app.get().change().isEmpty());
}
} | class UpgraderTest {
@Test
public void testUpgrading() {
DeploymentTester tester = new DeploymentTester();
Version version0 = Version.fromString("6.2");
tester.upgradeSystem(version0);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("No applications: Nothing to do", 0, tester.buildService().jobs().size());
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application conservative0 = tester.createAndDeploy("conservative0", 6, "conservative");
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("All already on the right version: Nothing to do", 0, tester.buildService().jobs().size());
Version version1 = Version.fromString("6.3");
tester.upgradeSystem(version1);
assertEquals(version1, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version1, "canary");
assertEquals(version1, tester.configServer().lastPrepareVersion().get());
tester.upgradeSystem(version1);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("One canary pending; nothing else", 2, tester.buildService().jobs().size());
tester.completeUpgrade(canary1, version1, "canary");
tester.upgradeSystem(version1);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Canaries done: Should upgrade defaults", 6, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version1, "default");
tester.completeUpgrade(default1, version1, "default");
tester.completeUpgrade(default2, version1, "default");
tester.upgradeSystem(version1);
assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Normals done: Should upgrade conservatives", 2, tester.buildService().jobs().size());
tester.completeUpgrade(conservative0, version1, "conservative");
tester.upgradeSystem(version1);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Nothing to do", 0, tester.buildService().jobs().size());
Version version2 = Version.fromString("6.4");
tester.upgradeSystem(version2);
assertEquals(version2, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
tester.completeUpgradeWithError(canary0, version2, "canary", stagingTest);
tester.upgradeSystem(version2);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.jobCompletion(stagingTest).application(canary0).unsuccessful().submit();
assertEquals("Version broken, but Canaries should keep trying", 3, tester.buildService().jobs().size());
Version version3 = Version.fromString("6.5");
tester.upgradeSystem(version3);
assertEquals(version3, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.buildService().remove(ControllerTester.buildJob(canary0, stagingTest));
tester.buildService().remove(ControllerTester.buildJob(canary1, systemTest));
tester.buildService().remove(ControllerTester.buildJob(canary1, stagingTest));
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version3, "canary");
assertEquals(version3, tester.configServer().lastPrepareVersion().get());
tester.upgradeSystem(version3);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("One canary pending; nothing else", 2, tester.buildService().jobs().size());
tester.completeUpgrade(canary1, version3, "canary");
tester.upgradeSystem(version3);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Canaries done: Should upgrade defaults", 6, tester.buildService().jobs().size());
tester.completeUpgradeWithError(default0, version3, "default", stagingTest);
tester.completeUpgrade(default1, version3, "default");
tester.completeUpgrade(default2, version3, "default");
tester.upgradeSystem(version3);
assertEquals("Not enough evidence to mark this as neither broken nor high",
VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
assertEquals("Upgrade with error should retry", 1, tester.buildService().jobs().size());
tester.jobCompletion(component).application(default0).nextBuildNumber().uploadArtifact(DeploymentTester.applicationPackage("default")).submit();
tester.jobCompletion(stagingTest).application(default0).unsuccessful().submit();
tester.deployAndNotify(default0, "default", true, systemTest);
tester.deployAndNotify(default0, "default", true, stagingTest);
tester.deployAndNotify(default0, "default", true, productionUsWest1);
tester.deployAndNotify(default0, "default", true, productionUsEast3);
tester.upgradeSystem(version3);
assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Normals done: Should upgrade conservatives", 2, tester.buildService().jobs().size());
tester.completeUpgrade(conservative0, version3, "conservative");
tester.upgradeSystem(version3);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Applications are on " + version3 + " - nothing to do", 0, tester.buildService().jobs().size());
Version version4 = Version.fromString("6.6");
Application default3 = tester.createAndDeploy("default3", 7, "default");
Application default4 = tester.createAndDeploy("default4", 8, "default");
tester.upgradeSystem(version4);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version4, "canary");
tester.completeUpgrade(canary1, version4, "canary");
tester.upgradeSystem(version4);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade of defaults are scheduled", 10, tester.buildService().jobs().size());
assertEquals(version4, tester.application(default0.id()).change().platform().get());
assertEquals(version4, tester.application(default1.id()).change().platform().get());
assertEquals(version4, tester.application(default2.id()).change().platform().get());
assertEquals(version4, tester.application(default3.id()).change().platform().get());
assertEquals(version4, tester.application(default4.id()).change().platform().get());
tester.completeUpgrade(default0, version4, "default");
Version version5 = Version.fromString("6.7");
tester.upgradeSystem(version5);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version5, "canary");
tester.completeUpgrade(canary1, version5, "canary");
tester.upgradeSystem(version5);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade of defaults are scheduled", 10, tester.buildService().jobs().size());
assertEquals(version5, tester.application(default0.id()).change().platform().get());
assertEquals(version4, tester.application(default1.id()).change().platform().get());
assertEquals(version4, tester.application(default2.id()).change().platform().get());
assertEquals(version4, tester.application(default3.id()).change().platform().get());
assertEquals(version4, tester.application(default4.id()).change().platform().get());
tester.completeUpgrade(default1, version4, "default");
tester.completeUpgrade(default2, version4, "default");
tester.completeUpgradeWithError(default3, version4, "default", stagingTest);
tester.completeUpgradeWithError(default4, version4, "default", JobType.productionUsWest1);
tester.clock().advance(Duration.ofHours(1));
tester.upgrader().maintain();
tester.jobCompletion(stagingTest).application(default3).unsuccessful().submit();
tester.jobCompletion(productionUsWest1).application(default4).unsuccessful().submit();
tester.completeUpgradeWithError(default0, version5, "default", stagingTest);
tester.completeUpgradeWithError(default1, version5, "default", stagingTest);
tester.completeUpgradeWithError(default2, version5, "default", stagingTest);
tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1)));
tester.readyJobTrigger().maintain();
tester.completeUpgradeWithError(default3, version5, "default", JobType.productionUsWest1);
tester.upgradeSystem(version5);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
tester.clock().advance(Duration.ofHours(1));
tester.jobCompletion(JobType.productionUsWest1).application(default3).unsuccessful().submit();
tester.upgrader().maintain();
tester.buildService().clear();
tester.triggerUntilQuiescence();
assertEquals("Upgrade of defaults are scheduled on " + version4 + " instead, since " + version5 + " is broken: " +
"This is default3 since it failed upgrade on both " + version4 + " and " + version5,
2, tester.buildService().jobs().size());
assertEquals(version4, tester.application(default3.id()).change().platform().get());
}
@Test
public void testUpgradingToVersionWhichBreaksSomeNonCanaries() {
DeploymentTester tester = new DeploymentTester();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("No system version: Nothing to do", 0, tester.buildService().jobs().size());
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("No applications: Nothing to do", 0, tester.buildService().jobs().size());
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application default3 = tester.createAndDeploy("default3", 6, "default");
Application default4 = tester.createAndDeploy("default4", 7, "default");
Application default5 = tester.createAndDeploy("default5", 8, "default");
Application default6 = tester.createAndDeploy("default6", 9, "default");
Application default7 = tester.createAndDeploy("default7", 10, "default");
Application default8 = tester.createAndDeploy("default8", 11, "default");
Application default9 = tester.createAndDeploy("default9", 12, "default");
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("All already on the right version: Nothing to do", 0, tester.buildService().jobs().size());
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
assertEquals(version, tester.configServer().lastPrepareVersion().get());
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("One canary pending; nothing else", 2, tester.buildService().jobs().size());
tester.completeUpgrade(canary1, version, "canary");
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Canaries done: Should upgrade defaults", 20, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version, "default");
tester.completeUpgradeWithError(default1, version, "default", systemTest);
tester.completeUpgradeWithError(default2, version, "default", systemTest);
tester.completeUpgradeWithError(default3, version, "default", systemTest);
tester.completeUpgradeWithError(default4, version, "default", systemTest);
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.buildService().clear();
tester.triggerUntilQuiescence();
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
assertEquals("Upgrades are cancelled", 0, tester.buildService().jobs().size());
}
@Test
public void testDeploymentAlreadyInProgressForUpgrade() {
DeploymentTester tester = new DeploymentTester();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.environment(Environment.prod)
.region("us-east-3")
.build();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
Application app = tester.createApplication("app1", "tenant1", 1, 11L);
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Application is on expected version: Nothing to do", 0,
tester.buildService().jobs().size());
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, false, stagingTest);
assertTrue("Failure is recorded", tester.application(app.id()).deploymentJobs().hasFailures());
assertTrue("Application has pending change", tester.application(app.id()).change().hasTargets());
version = Version.fromString("6.4");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.jobCompletion(stagingTest).application(app).unsuccessful().submit();
assertTrue("Application still has failures", tester.application(app.id()).deploymentJobs().hasFailures());
assertEquals(2, tester.buildService().jobs().size());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
}
@Test
public void testUpgradeCancelledWithDeploymentInProgress() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application default3 = tester.createAndDeploy("default3", 6, "default");
Application default4 = tester.createAndDeploy("default4", 7, "default");
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version, "canary");
tester.completeUpgrade(canary1, version, "canary");
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for remaining apps", 10, tester.buildService().jobs().size());
tester.completeUpgradeWithError(default0, version, "default", systemTest);
tester.completeUpgradeWithError(default1, version, "default", systemTest);
tester.completeUpgradeWithError(default2, version, "default", systemTest);
tester.completeUpgradeWithError(default3, version, "default", systemTest);
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertFalse("No change present", tester.applications().require(default4.id()).change().hasTargets());
tester.jobCompletion(systemTest).application(default0).submit();
tester.jobCompletion(systemTest).application(default1).submit();
tester.jobCompletion(systemTest).application(default2).submit();
tester.jobCompletion(systemTest).application(default3).submit();
tester.jobCompletion(systemTest).application(default4).submit();
tester.jobCompletion(stagingTest).application(default0).submit();
tester.jobCompletion(stagingTest).application(default1).submit();
tester.jobCompletion(stagingTest).application(default2).submit();
tester.jobCompletion(stagingTest).application(default3).submit();
tester.jobCompletion(stagingTest).application(default4).submit();
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
/**
* Scenario:
* An application A is on version V0
* Version V2 is released.
* A upgrades one production zone to V2.
* V2 is marked as broken and upgrade of A to V2 is cancelled.
* Upgrade of A to V1 is scheduled: Should skip the zone on V2 but upgrade the next zone to V1
*/
@Test
public void testVersionIsBrokenAfterAZoneIsLive() {
DeploymentTester tester = new DeploymentTester();
Version v0 = Version.fromString("6.2");
tester.upgradeSystem(v0);
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application default3 = tester.createAndDeploy("default3", 6, "default");
Application default4 = tester.createAndDeploy("default4", 7, "default");
Version v1 = Version.fromString("6.3");
tester.upgradeSystem(v1);
assertEquals(v1, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, v1, "canary");
tester.completeUpgrade(canary1, v1, "canary");
tester.upgradeSystem(v1);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
Version v2 = Version.fromString("6.4");
tester.upgradeSystem(v2);
assertEquals(v2, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, v2, "canary");
tester.completeUpgrade(canary1, v2, "canary");
tester.upgradeSystem(v2);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.deploymentTrigger().cancelChange(default0.id(), ALL);
tester.deploymentTrigger().cancelChange(default1.id(), ALL);
tester.deploymentTrigger().cancelChange(default2.id(), ALL);
tester.deploymentTrigger().cancelChange(default3.id(), ALL);
tester.buildService().clear();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for remaining apps", 10, tester.buildService().jobs().size());
assertEquals("default4 is still upgrading to 5.1", v1, tester.application(default4.id()).change().platform().get());
tester.completeUpgradeWithError(default0, v2, "default", productionUsEast3);
tester.completeUpgradeWithError(default1, v2, "default", productionUsEast3);
tester.completeUpgradeWithError(default2, v2, "default", productionUsEast3);
tester.completeUpgradeWithError(default3, v2, "default", productionUsEast3);
tester.upgradeSystem(v2);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
assertEquals(v2, tester.application("default0").deployments().get(ZoneId.from("prod.us-west-1")).version());
assertEquals(v0, tester.application("default0").deployments().get(ZoneId.from("prod.us-east-3")).version());
tester.upgrader().maintain();
tester.buildService().clear();
tester.triggerUntilQuiescence();
assertEquals("Upgrade to 5.1 scheduled for apps not completely on 5.1 or 5.2", 10, tester.buildService().jobs().size());
tester.deployAndNotify(tester.application("default0"), "default", true, systemTest);
tester.deployAndNotify(tester.application("default0"), "default", true, stagingTest);
tester.deployAndNotify(tester.application("default0"), "default", true, productionUsEast3);
assertEquals(v2, tester.application("default0").deployments().get(ZoneId.from("prod.us-west-1")).version());
assertEquals("Last zone is upgraded to v1",
v1, tester.application("default0").deployments().get(ZoneId.from("prod.us-east-3")).version());
assertFalse(tester.application("default0").change().hasTargets());
}
@Test
public void testConfidenceIgnoresFailingApplicationChanges() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage canaryPolicy = DeploymentTester.applicationPackage("canary");
ApplicationPackage defaultPolicy = DeploymentTester.applicationPackage("default");
Application canary0 = tester.createAndDeploy("canary0", 1, canaryPolicy);
Application canary1 = tester.createAndDeploy("canary1", 2, canaryPolicy);
Application default0 = tester.createAndDeploy("default0", 3, defaultPolicy);
Application default1 = tester.createAndDeploy("default1", 4, defaultPolicy);
Application default2 = tester.createAndDeploy("default2", 5, defaultPolicy);
Application default3 = tester.createAndDeploy("default3", 6, defaultPolicy);
Application default4 = tester.createAndDeploy("default4", 7, defaultPolicy);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version, "canary");
tester.completeUpgrade(canary1, version, "canary");
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(default0, version, "default");
tester.completeUpgrade(default1, version, "default");
tester.completeUpgrade(default2, version, "default");
tester.completeUpgrade(default3, version, "default");
tester.completeUpgrade(default4, version, "default");
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence());
tester.jobCompletion(component).application(default0).nextBuildNumber().uploadArtifact(canaryPolicy).unsuccessful().submit();
tester.jobCompletion(component).application(default1).nextBuildNumber().uploadArtifact(canaryPolicy).unsuccessful().submit();
tester.jobCompletion(component).application(default2).nextBuildNumber().uploadArtifact(defaultPolicy).submit();
tester.jobCompletion(component).application(default3).nextBuildNumber().uploadArtifact(defaultPolicy).submit();
tester.jobCompletion(component).application(default2).nextBuildNumber().uploadArtifact(canaryPolicy).unsuccessful().submit();
tester.jobCompletion(component).application(default3).nextBuildNumber(2).uploadArtifact(canaryPolicy).unsuccessful().submit();
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
}
@Test
public void testBlockVersionChange() {
DeploymentTester tester = new DeploymentTester().at(Instant.parse("2017-09-26T18:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.blockChange(false, true, "tue", "18-19", "UTC")
.region("us-west-1")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertTrue("No jobs scheduled", tester.buildService().jobs().isEmpty());
tester.clock().advance(Duration.ofHours(1));
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertTrue("No jobs scheduled", tester.buildService().jobs().isEmpty());
tester.clock().advance(Duration.ofHours(1));
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertFalse("Job is scheduled", tester.buildService().jobs().isEmpty());
tester.completeUpgrade(app, version, applicationPackage);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
public void testBlockVersionChangeHalfwayThough() {
DeploymentTester tester = new DeploymentTester().at(Instant.parse("2017-09-26T17:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.blockChange(false, true, "tue", "18-19", "UTC")
.region("us-west-1")
.region("us-central-1")
.region("us-east-3")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofHours(1));
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
@Test
public void testReschedulesUpgradeAfterTimeout() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage canaryApplicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.environment(Environment.prod)
.region("us-west-1")
.build();
ApplicationPackage defaultApplicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
Application canary0 = tester.createAndDeploy("canary0", 1, canaryApplicationPackage);
Application canary1 = tester.createAndDeploy("canary1", 2, canaryApplicationPackage);
Application default0 = tester.createAndDeploy("default0", 3, defaultApplicationPackage);
Application default1 = tester.createAndDeploy("default1", 4, defaultApplicationPackage);
Application default2 = tester.createAndDeploy("default2", 5, defaultApplicationPackage);
Application default3 = tester.createAndDeploy("default3", 6, defaultApplicationPackage);
Application default4 = tester.createAndDeploy("default4", 7, defaultApplicationPackage);
assertEquals(version, default0.oldestDeployedPlatform().get());
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.completeUpgrade(canary0, version, canaryApplicationPackage);
tester.completeUpgrade(canary1, version, canaryApplicationPackage);
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.clock().advance(Duration.ofMinutes(1));
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for remaining apps", 10, tester.buildService().jobs().size());
tester.completeUpgradeWithError(default0, version, defaultApplicationPackage, systemTest);
tester.completeUpgradeWithError(default1, version, defaultApplicationPackage, systemTest);
tester.completeUpgradeWithError(default2, version, defaultApplicationPackage, systemTest);
tester.completeUpgradeWithError(default3, version, defaultApplicationPackage, systemTest);
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.jobCompletion(systemTest).application(default0).unsuccessful().submit();
tester.jobCompletion(systemTest).application(default1).unsuccessful().submit();
tester.jobCompletion(systemTest).application(default2).unsuccessful().submit();
tester.jobCompletion(systemTest).application(default3).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default0).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default1).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default2).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default3).unsuccessful().submit();
tester.jobCompletion(stagingTest).application(default4).unsuccessful().submit();
Application deadLocked = tester.applications().require(default4.id());
tester.assertRunning(systemTest, deadLocked.id());
assertFalse("No change present", deadLocked.change().hasTargets());
ApplicationPackage defaultApplicationPackageV2 = new ApplicationPackageBuilder()
.searchDefinition("search test { field test type string {} }")
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
tester.deployCompletely(default0, defaultApplicationPackageV2, 43);
tester.deployCompletely(default1, defaultApplicationPackageV2, 43);
tester.deployCompletely(default2, defaultApplicationPackageV2, 43);
tester.deployCompletely(default3, defaultApplicationPackageV2, 43);
tester.upgradeSystem(version);
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for previously failing apps, and hanging job still running", 9, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version, defaultApplicationPackageV2);
tester.completeUpgrade(default1, version, defaultApplicationPackageV2);
tester.completeUpgrade(default2, version, defaultApplicationPackageV2);
tester.completeUpgrade(default3, version, defaultApplicationPackageV2);
assertEquals(version, tester.application(default0.id()).oldestDeployedPlatform().get());
assertEquals(version, tester.application(default1.id()).oldestDeployedPlatform().get());
assertEquals(version, tester.application(default2.id()).oldestDeployedPlatform().get());
assertEquals(version, tester.application(default3.id()).oldestDeployedPlatform().get());
}
@Test
public void testThrottlesUpgrades() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
Upgrader upgrader = new Upgrader(tester.controller(), Duration.ofMinutes(10),
new JobControl(tester.controllerTester().curator()),
tester.controllerTester().curator());
upgrader.setUpgradesPerMinute(0.2);
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
Application default0 = tester.createAndDeploy("default0", 3, "default");
Application default1 = tester.createAndDeploy("default1", 4, "default");
Application default2 = tester.createAndDeploy("default2", 5, "default");
Application default3 = tester.createAndDeploy("default3", 6, "default");
Application dev0 = tester.createApplication("dev0", "tenant1", 7, 1L);
tester.controllerTester().deploy(dev0, ZoneId.from(Environment.dev, RegionName.from("dev-region")));
version = Version.fromString("6.3");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.triggerUntilQuiescence();
assertEquals(4, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
tester.completeUpgrade(canary1, version, "canary");
tester.upgradeSystem(version);
tester.readyJobTrigger().maintain();
assertEquals(4, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version, "default");
tester.completeUpgrade(default1, version, "default");
upgrader.maintain();
tester.triggerUntilQuiescence();
assertEquals(4, tester.buildService().jobs().size());
tester.completeUpgrade(default2, version, "default");
tester.completeUpgrade(default3, version, "default");
upgrader.maintain();
tester.triggerUntilQuiescence();
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
public void testPinningMajorVersionInDeploymentXml() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage version6ApplicationPackage = new ApplicationPackageBuilder()
.majorVersion(6)
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application default0 = tester.createAndDeploy("default0", 2, version6ApplicationPackage);
version = Version.fromString("7.0");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
assertEquals(0, tester.buildService().jobs().size());
tester.computeVersionStatus();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(0, tester.buildService().jobs().size());
}
@Test
public void testPinningMajorVersionInApplication() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage default0ApplicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
Application default0 = tester.createAndDeploy("default0", 2, default0ApplicationPackage);
tester.applications().lockOrThrow(default0.id(), a -> tester.applications().store(a.withMajorVersion(6)));
assertEquals(OptionalInt.of(6), tester.applications().get(default0.id()).get().majorVersion());
version = Version.fromString("7.0");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
assertEquals(0, tester.buildService().jobs().size());
tester.computeVersionStatus();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(0, tester.buildService().jobs().size());
}
@Test
public void testPinningMajorVersionInUpgrader() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage version7CanaryApplicationPackage = new ApplicationPackageBuilder()
.majorVersion(7)
.upgradePolicy("canary")
.environment(Environment.prod)
.region("us-west-1")
.build();
ApplicationPackage version7DefaultApplicationPackage = new ApplicationPackageBuilder()
.majorVersion(7)
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-west-1")
.build();
Application canary0 = tester.createAndDeploy("canary", 1, version7CanaryApplicationPackage);
Application default0 = tester.createAndDeploy("default0", 2, version7DefaultApplicationPackage);
Application default1 = tester.createAndDeploy("default1", 3, "default");
tester.upgrader().setTargetMajorVersion(Optional.of(6));
version = Version.fromString("7.0");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(canary0, version, version7CanaryApplicationPackage);
assertEquals(0, tester.buildService().jobs().size());
tester.computeVersionStatus();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(default0, version, version7DefaultApplicationPackage);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(0, tester.buildService().jobs().size());
tester.upgrader().setTargetMajorVersion(Optional.empty());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals(2, tester.buildService().jobs().size());
tester.completeUpgrade(default1, version, "default");
}
@Test
public void testAllowApplicationChangeDuringFailingUpgrade() {
DeploymentTester tester = new DeploymentTester();
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, false, productionUsWest1);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
String applicationVersion = "1.0.43-commit1";
app = tester.application(app.id());
assertTrue("Change contains both upgrade and application change",
app.change().platform().get().equals(version) &&
app.change().application().get().id().equals(applicationVersion));
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.jobCompletion(productionUsWest1).application(app).unsuccessful().submit();
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
app = tester.application(app.id());
for (Deployment deployment : app.deployments().values()) {
assertEquals(version, deployment.version());
assertEquals(applicationVersion, deployment.applicationVersion().id());
}
}
@Test
public void testBlockRevisionChangeHalfwayThoughThenUpgrade() {
DeploymentTester tester = new DeploymentTester().at(Instant.parse("2017-09-26T17:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.blockChange(true, false, "tue", "18-19", "UTC")
.region("us-west-1")
.region("us-central-1")
.region("us-east-3")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofHours(1));
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
version = Version.fromString("6.3");
tester.upgradeSystem(version);
tester.triggerUntilQuiescence();
assertEquals(1, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs());
tester.upgrader().maintain();
tester.readyJobTrigger().maintain();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
public void testBlockRevisionChangeHalfwayThoughThenNewRevision() {
DeploymentTester tester = new DeploymentTester().at(Instant.parse("2017-09-26T17:00:00.00Z"));
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.blockChange(true, false, "tue", "18-19", "UTC")
.region("us-west-1")
.region("us-central-1")
.region("us-east-3")
.build();
Application app = tester.createAndDeploy("app1", 1, applicationPackage);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.clock().advance(Duration.ofHours(1));
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
tester.jobCompletion(component).application(app).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.triggerUntilQuiescence();
assertEquals(3, tester.buildService().jobs().size());
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs());
tester.outstandingChangeDeployer().run();
assertFalse(tester.application(app.id()).change().hasTargets());
tester.clock().advance(Duration.ofHours(2));
tester.outstandingChangeDeployer().run();
assertTrue(tester.application(app.id()).change().hasTargets());
tester.readyJobTrigger().run();
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertFalse(tester.application(app.id()).change().hasTargets());
}
@Test
public void testPinning() {
DeploymentTester tester = new DeploymentTester();
Version version0 = Version.fromString("6.2");
tester.upgradeSystem(version0);
Application application = tester.createApplication("application", "tenant", 2, 3);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().environment(Environment.prod)
.region("us-east-3")
.region("us-west-1")
.build();
tester.deploymentTrigger().forceChange(application.id(), Change.empty().withPin());
tester.deployCompletely(application, applicationPackage);
assertFalse(tester.application(application.id()).change().hasTargets());
assertTrue(tester.application(application.id()).change().isPinned());
assertEquals(2, tester.application(application.id()).deployments().size());
Version version1 = Version.fromString("6.3");
tester.upgradeSystem(version1);
tester.upgrader().maintain();
assertFalse(tester.application(application.id()).change().hasTargets());
assertTrue(tester.application(application.id()).change().isPinned());
tester.deployCompletely(application, applicationPackage, BuildJob.defaultBuildNumber + 1);
assertFalse(tester.application(application.id()).change().hasTargets());
assertTrue(tester.application(application.id()).change().isPinned());
tester.deploymentTrigger().cancelChange(application.id(), PIN);
tester.upgrader().maintain();
assertTrue(tester.application(application.id()).change().hasTargets());
assertFalse(tester.application(application.id()).change().isPinned());
tester.deploymentTrigger().forceChange(application.id(), Change.empty().withPin());
tester.upgrader().maintain();
tester.readyJobTrigger().maintain();
assertEquals(version1, tester.application(application.id()).change().platform().get());
tester.deployAndNotify(application, true, systemTest);
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsEast3);
tester.deploy(productionUsWest1, application, Optional.empty(), false);
tester.deployAndNotify(application, false, productionUsWest1);
tester.deploymentTrigger().cancelChange(application.id(), ALL);
tester.deploymentTrigger().forceChange(application.id(), Change.of(version0).withPin());
tester.buildService().clear();
assertEquals(version0, tester.application(application.id()).change().platform().get());
tester.readyJobTrigger().maintain();
tester.deployAndNotify(application, true, systemTest);
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsEast3);
assertTrue(tester.application(application.id()).change().hasTargets());
tester.deployAndNotify(application, true, productionUsWest1);
assertFalse(tester.application(application.id()).change().hasTargets());
}
@Test
public void upgradesToLatestAllowedMajor() {
DeploymentTester tester = new DeploymentTester();
Version version0 = Version.fromString("6.1");
tester.upgradeSystem(version0);
tester.upgrader().setTargetMajorVersion(Optional.of(6));
Application app1 = tester.createAndDeploy("app1", 1, "default");
Application app2 = tester.createAndDeploy("app2", 1, "default");
tester.controller().applications().lockIfPresent(app1.id(), app -> tester.controller().applications().store(app.withChange(app.get().change().withPin())));
Version version1 = Version.fromString("6.2");
tester.upgradeSystem(version1);
tester.upgrader().maintain();
tester.completeUpgrade(app2, version1, "default");
Version version2 = Version.fromString("7.1");
tester.upgradeSystem(version2);
tester.controller().applications().lockIfPresent(app2.id(), app -> tester.applications().store(app.withMajorVersion(7)));
tester.upgrader().maintain();
assertEquals(version2, tester.controller().applications().require(app2.id()).change().platform().get());
tester.controller().applications().lockIfPresent(app1.id(), app -> tester.controller().applications().store(app.withChange(app.get().change().withoutPin())));
tester.upgrader().maintain();
assertEquals("Application upgrades to latest allowed major", version1,
tester.controller().applications().require(app1.id()).change().platform().get());
}
@Test
public void testsEachUpgradeCombinationWithFailingDeployments() {
DeploymentTester tester = new DeploymentTester();
Application application = tester.createApplication("app1", "tenant1", 1, 1L);
Supplier<Application> app = () -> tester.application(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.region("us-west-1")
.region("us-east-3")
.build();
Version v1 = Version.fromString("6.1");
tester.deployCompletely(application, applicationPackage);
Version v2 = Version.fromString("6.2");
tester.upgradeSystem(v2);
tester.upgrader().maintain();
assertEquals(Change.of(v2), app.get().change());
tester.deployAndNotify(application, true, systemTest);
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsCentral1);
tester.upgrader().overrideConfidence(v2, VespaVersion.Confidence.broken);
tester.computeVersionStatus();
tester.upgrader().maintain();
tester.deployAndNotify(application, true, productionUsWest1);
assertTrue(app.get().change().isEmpty());
Version v3 = Version.fromString("6.3");
tester.upgradeSystem(v3);
tester.upgrader().maintain();
assertEquals(Change.of(v3), app.get().change());
tester.deployAndNotify(application, true, systemTest);
tester.deployAndNotify(application, true, stagingTest);
tester.deploy(productionUsCentral1, application, applicationPackage);
tester.readyJobTrigger().maintain();
tester.deployAndNotify(application, true, stagingTest);
assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().platform());
tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit();
tester.jobCompletion(productionUsCentral1).application(application).submit();
assertEquals(v3, app.get().deployments().get(productionUsCentral1.zone(main)).version());
assertEquals(v2, app.get().deployments().get(productionUsWest1.zone(main)).version());
assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main)).version());
tester.readyJobTrigger().maintain();
assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsWest1);
tester.readyJobTrigger().maintain();
assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
tester.deployAndNotify(application, true, stagingTest);
tester.deployAndNotify(application, true, productionUsEast3);
assertTrue("Upgrade complete", app.get().change().isEmpty());
}
} |
```suggestion ``` | private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
NodeState nodeState = NodeState.valueOf(node.state);
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
System.out.println("Got node " + node);
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
nodeType,
node.flavor,
Optional.ofNullable(node.wantedVespaVersion).map(Version::fromString),
Optional.ofNullable(node.vespaVersion).map(Version::fromString),
Optional.ofNullable(node.wantedOsVersion).map(Version::fromString),
Optional.ofNullable(node.currentOsVersion).map(Version::fromString),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(node.wantToDeprovision),
Optional.ofNullable(node.owner).map(o -> ApplicationId.from(o.tenant, o.application, o.instance)),
membership,
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
Optional.ofNullable(node.wantedFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
new NodeResources(
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.bandwidthGbps,
toDiskSpeed(node.fastDisk),
toStorageType(node.remoteStorage)),
node.ipAddresses,
node.additionalIpAddresses,
reports,
Optional.ofNullable(node.parentHostname));
} | System.out.println("Got node " + node); | private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
NodeState nodeState = NodeState.valueOf(node.state);
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
nodeType,
node.flavor,
Optional.ofNullable(node.wantedVespaVersion).map(Version::fromString),
Optional.ofNullable(node.vespaVersion).map(Version::fromString),
Optional.ofNullable(node.wantedOsVersion).map(Version::fromString),
Optional.ofNullable(node.currentOsVersion).map(Version::fromString),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(node.wantToDeprovision),
Optional.ofNullable(node.owner).map(o -> ApplicationId.from(o.tenant, o.application, o.instance)),
membership,
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
Optional.ofNullable(node.wantedFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
new NodeResources(
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.bandwidthGbps,
toDiskSpeed(node.fastDisk),
toStorageType(node.remoteStorage)),
node.ipAddresses,
node.additionalIpAddresses,
reports,
Optional.ofNullable(node.parentHostname));
} | class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode);
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toSet())));
Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(
node -> new Acl.Node(node.hostname, node.ipAddress),
Collectors.toSet())));
Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream()
.collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy,
Collectors.mapping(node -> node.network, Collectors.toSet())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname),
trustedNetworks.get(hostname))));
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode);
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
logger.info(response.message);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
private static NodeResources.DiskSpeed toDiskSpeed(Boolean fastDisk) {
if (fastDisk == null) return NodeResources.DiskSpeed.any;
if (fastDisk) return NodeResources.DiskSpeed.fast;
else return NodeResources.DiskSpeed.slow;
}
private static NodeResources.StorageType toStorageType(Boolean remoteStorage) {
if (remoteStorage == null) return NodeResources.StorageType.any;
if (remoteStorage) return NodeResources.StorageType.remote;
else return NodeResources.StorageType.local;
}
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = "fake-" + addNode.hostname;
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
addNode.flavorOverrides.flatMap(FlavorOverrides::diskGb).ifPresent(d -> node.minDiskAvailableGb = d);
addNode.nodeResources.ifPresent(resources -> {
node.minCpuCores = resources.vcpu();
node.minMainMemoryAvailableGb = resources.memoryGb();
node.minDiskAvailableGb = resources.diskGb();
node.bandwidthGbps = resources.bandwidthGbps();
node.fastDisk = resources.diskSpeed() == NodeResources.DiskSpeed.fast;
node.remoteStorage = resources.storageType() == NodeResources.StorageType.remote;
});
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
}
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
node.vespaVersion = nodeAttributes.getVespaVersion().map(Version::toFullString).orElse(null);
node.currentOsVersion = nodeAttributes.getCurrentOsVersion().map(Version::toFullString).orElse(null);
node.currentFirmwareCheck = nodeAttributes.getCurrentFirmwareCheck().map(Instant::toEpochMilli).orElse(null);
node.wantToDeprovision = nodeAttributes.getWantToDeprovision().orElse(null);
Map<String, JsonNode> reports = nodeAttributes.getReports();
node.reports = reports == null || reports.isEmpty() ? null : new TreeMap<>(reports);
return node;
}
} | class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode);
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toSet())));
Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(
node -> new Acl.Node(node.hostname, node.ipAddress),
Collectors.toSet())));
Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream()
.collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy,
Collectors.mapping(node -> node.network, Collectors.toSet())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname),
trustedNetworks.get(hostname))));
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode);
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
logger.info(response.message);
if (Strings.isNullOrEmpty(response.errorCode)) return;
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
private static NodeResources.DiskSpeed toDiskSpeed(Boolean fastDisk) {
if (fastDisk == null) return NodeResources.DiskSpeed.any;
if (fastDisk) return NodeResources.DiskSpeed.fast;
else return NodeResources.DiskSpeed.slow;
}
private static NodeResources.StorageType toStorageType(Boolean remoteStorage) {
if (remoteStorage == null) return NodeResources.StorageType.any;
if (remoteStorage) return NodeResources.StorageType.remote;
else return NodeResources.StorageType.local;
}
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = "fake-" + addNode.hostname;
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
addNode.flavorOverrides.flatMap(FlavorOverrides::diskGb).ifPresent(d -> node.minDiskAvailableGb = d);
addNode.nodeResources.ifPresent(resources -> {
node.minCpuCores = resources.vcpu();
node.minMainMemoryAvailableGb = resources.memoryGb();
node.minDiskAvailableGb = resources.diskGb();
node.bandwidthGbps = resources.bandwidthGbps();
node.fastDisk = resources.diskSpeed() == NodeResources.DiskSpeed.fast;
node.remoteStorage = resources.storageType() == NodeResources.StorageType.remote;
});
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
}
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
node.vespaVersion = nodeAttributes.getVespaVersion().map(Version::toFullString).orElse(null);
node.currentOsVersion = nodeAttributes.getCurrentOsVersion().map(Version::toFullString).orElse(null);
node.currentFirmwareCheck = nodeAttributes.getCurrentFirmwareCheck().map(Instant::toEpochMilli).orElse(null);
node.wantToDeprovision = nodeAttributes.getWantToDeprovision().orElse(null);
Map<String, JsonNode> reports = nodeAttributes.getReports();
node.reports = reports == null || reports.isEmpty() ? null : new TreeMap<>(reports);
return node;
}
} |
Its fine to always update this one and the block below even in suspended. The main issue are the system metrics which involve docker daemon, which may not be available (f.ex. docker being upgraded). | public void updateMetrics(boolean isSuspended) {
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (!isSuspended) numberOfUnhandledExceptions.add(nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions());
nodeAgentWithScheduler.updateContainerNodeMetrics(isSuspended);
}
if (!isSuspended) {
Runtime runtime = Runtime.getRuntime();
long freeMemory = runtime.freeMemory();
long totalMemory = runtime.totalMemory();
long usedMemory = totalMemory - freeMemory;
jvmHeapFree.sample(freeMemory);
jvmHeapUsed.sample(usedMemory);
jvmHeapTotal.sample(totalMemory);
}
} | if (!isSuspended) numberOfUnhandledExceptions.add(nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions()); | public void updateMetrics(boolean isSuspended) {
for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) {
if (!isSuspended) numberOfUnhandledExceptions.add(nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions());
nodeAgentWithScheduler.updateContainerNodeMetrics(isSuspended);
}
if (!isSuspended) {
Runtime runtime = Runtime.getRuntime();
long freeMemory = runtime.freeMemory();
long totalMemory = runtime.totalMemory();
long usedMemory = totalMemory - freeMemory;
jvmHeapFree.sample(freeMemory);
jvmHeapUsed.sample(usedMemory);
jvmHeapTotal.sample(totalMemory);
}
} | class NodeAdminImpl implements NodeAdmin {
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3);
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final Clock clock;
private final Duration freezeTimeout;
private final Duration spread;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final Gauge jvmHeapUsed;
private final Gauge jvmHeapFree;
private final Gauge jvmHeapTotal;
private final Counter numberOfUnhandledExceptions;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD);
}
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics,
Clock clock, Duration freezeTimeout, Duration spread) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
metrics, clock, freezeTimeout, spread);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.clock = clock;
this.freezeTimeout = freezeTimeout;
this.spread = spread;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions",
new Dimensions(Map.of("src", "node-agents")));
this.jvmHeapUsed = metrics.declareGauge("mem.heap.used");
this.jvmHeapFree = metrics.declareGauge("mem.heap.free");
this.jvmHeapTotal = metrics.declareGauge("mem.heap.total");
}
@Override
public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) {
Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream()
.collect(Collectors.toMap(nac -> nac.hostname().value(), Function.identity()));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stopForRemoval());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1));
Instant nextAgentStart = clock.instant();
for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) {
nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart);
nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents);
}
}
@Override
@Override
public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
}
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ZERO;
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(NodeAgentWithScheduler::stopForHostSuspension);
}
@Override
public void start() {
}
@Override
public void stop() {
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgentWithScheduler::stopForRemoval);
}
private static <T> Set<T> diff(Set<T> minuend, Set<T> subtrahend) {
var result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
void start() { nodeAgent.start(currentContext()); }
void stopForHostSuspension() { nodeAgent.stopForHostSuspension(currentContext()); }
void stopForRemoval() { nodeAgent.stopForRemoval(currentContext()); }
void updateContainerNodeMetrics(boolean isSuspended) { nodeAgent.updateContainerNodeMetrics(currentContext(), isSuspended); }
int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
@Override public NodeAgentContext currentContext() { return nodeAgentScheduler.currentContext(); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} | class NodeAdminImpl implements NodeAdmin {
private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5);
private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3);
private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory;
private final Clock clock;
private final Duration freezeTimeout;
private final Duration spread;
private boolean previousWantFrozen;
private boolean isFrozen;
private Instant startOfFreezeConvergence;
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final Gauge jvmHeapUsed;
private final Gauge jvmHeapFree;
private final Gauge jvmHeapTotal;
private final Counter numberOfUnhandledExceptions;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD);
}
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics,
Clock clock, Duration freezeTimeout, Duration spread) {
this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext),
metrics, clock, freezeTimeout, spread);
}
NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory,
Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread) {
this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory;
this.clock = clock;
this.freezeTimeout = freezeTimeout;
this.spread = spread;
this.previousWantFrozen = true;
this.isFrozen = true;
this.startOfFreezeConvergence = clock.instant();
this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions",
new Dimensions(Map.of("src", "node-agents")));
this.jvmHeapUsed = metrics.declareGauge("mem.heap.used");
this.jvmHeapFree = metrics.declareGauge("mem.heap.free");
this.jvmHeapTotal = metrics.declareGauge("mem.heap.total");
}
@Override
public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) {
Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream()
.collect(Collectors.toMap(nac -> nac.hostname().value(), Function.identity()));
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
.forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stopForRemoval());
diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> {
NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname));
naws.start();
nodeAgentWithSchedulerByHostname.put(hostname, naws);
});
Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1));
Instant nextAgentStart = clock.instant();
for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) {
nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart);
nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents);
}
}
@Override
@Override
public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
this.startOfFreezeConvergence = clock.instant();
} else {
this.startOfFreezeConvergence = null;
}
previousWantFrozen = wantFrozen;
}
boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout))
.count() == 0;
if (wantFrozen) {
if (allNodeAgentsConverged) isFrozen = true;
} else isFrozen = false;
return allNodeAgentsConverged;
}
@Override
public boolean isFrozen() {
return isFrozen;
}
@Override
public Duration subsystemFreezeDuration() {
if (startOfFreezeConvergence == null) {
return Duration.ZERO;
} else {
return Duration.between(startOfFreezeConvergence, clock.instant());
}
}
@Override
public void stopNodeAgentServices(List<String> hostnames) {
hostnames.parallelStream()
.filter(nodeAgentWithSchedulerByHostname::containsKey)
.map(nodeAgentWithSchedulerByHostname::get)
.forEach(NodeAgentWithScheduler::stopForHostSuspension);
}
@Override
public void start() {
}
@Override
public void stop() {
nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgentWithScheduler::stopForRemoval);
}
private static <T> Set<T> diff(Set<T> minuend, Set<T> subtrahend) {
var result = new HashSet<>(minuend);
result.removeAll(subtrahend);
return result;
}
static class NodeAgentWithScheduler implements NodeAgentScheduler {
private final NodeAgent nodeAgent;
private final NodeAgentScheduler nodeAgentScheduler;
private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) {
this.nodeAgent = nodeAgent;
this.nodeAgentScheduler = nodeAgentScheduler;
}
void start() { nodeAgent.start(currentContext()); }
void stopForHostSuspension() { nodeAgent.stopForHostSuspension(currentContext()); }
void stopForRemoval() { nodeAgent.stopForRemoval(currentContext()); }
void updateContainerNodeMetrics(boolean isSuspended) { nodeAgent.updateContainerNodeMetrics(currentContext(), isSuspended); }
int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); }
@Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); }
@Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); }
@Override public NodeAgentContext currentContext() { return nodeAgentScheduler.currentContext(); }
}
@FunctionalInterface
interface NodeAgentWithSchedulerFactory {
NodeAgentWithScheduler create(NodeAgentContext context);
}
private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) {
NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context);
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
} |
Wasted unique opportunity to use `Stream::takeWhile` here | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | if (entry.getValue().isAfter(to)) | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} |
o_O Well, would have to use a boolean reference, as the first item where the predicate fails, should also be included. But ... O_o | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | if (entry.getValue().isAfter(to)) | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} |
It is not, f.ex.: ``` IntStream.range(0, 1000) .dropWhile(i -> i < 544) .takeWhile(i -> i < 549) .forEach(System.out::println); ``` Gives: ``` 544 545 546 547 548 ``` | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | if (entry.getValue().isAfter(to)) | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} |
Exactly, but I want it to be. | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | if (entry.getValue().isAfter(to)) | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} |
Ah yes, I can't read :man_facepalming: | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | if (entry.getValue().isAfter(to)) | private List<Path> getMatchingFiles(Instant from, Instant to) {
Map<Path, Instant> paths = new HashMap<>();
try {
Files.walkFileTree(logDirectory, new SimpleFileVisitor<>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
if (logFilePattern.matcher(file.getFileName().toString()).matches())
paths.put(file, attrs.lastModifiedTime().toInstant());
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
return FileVisitResult.CONTINUE;
}
});
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
List<Path> sorted = new ArrayList<>();
for (var entries = paths.entrySet().stream().sorted(comparing(Map.Entry::getValue)).iterator(); entries.hasNext(); ) {
var entry = entries.next();
if (entry.getValue().isAfter(from))
sorted.add(entry.getKey());
if (entry.getValue().isAfter(to))
break;
}
return sorted;
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} | class LogReader {
private final Path logDirectory;
private final Pattern logFilePattern;
LogReader(String logDirectory, String logFilePattern) {
this(Paths.get(Defaults.getDefaults().underVespaHome(logDirectory)), Pattern.compile(logFilePattern));
}
LogReader(Path logDirectory, Pattern logFilePattern) {
this.logDirectory = logDirectory;
this.logFilePattern = logFilePattern;
}
void writeLogs(OutputStream outputStream, Instant from, Instant to) {
try {
List<Path> logs = getMatchingFiles(from, to);
for (int i = 0; i < logs.size(); i++) {
Path log = logs.get(i);
boolean zipped = log.toString().endsWith(".gz");
try (InputStream in = Files.newInputStream(log)) {
InputStream inProxy;
if (i == 0 || i == logs.size() - 1) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(zipped ? new GZIPInputStream(in) : in, UTF_8));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(zipped ? new GZIPOutputStream(buffer) : buffer, UTF_8))) {
for (String line; (line = reader.readLine()) != null; ) {
String[] parts = line.split("\t");
if (parts.length != 7)
continue;
Instant at = Instant.EPOCH.plus((long) (Double.parseDouble(parts[0]) * 1_000_000), ChronoUnit.MICROS);
if (at.isAfter(from) && ! at.isAfter(to)) {
writer.write(line);
writer.newLine();
}
}
}
inProxy = new ByteArrayInputStream(buffer.toByteArray());
}
else
inProxy = in;
if ( ! zipped && ! (outputStream instanceof GZIPOutputStream))
outputStream = new GZIPOutputStream(outputStream);
inProxy.transferTo(outputStream);
}
}
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
outputStream.close();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
/** Returns log files which may have relevant entries, sorted by modification time — the first and last must be filtered. */
} |
Consider ``` Map<ApplicationId, List<Node>> nodesByApplication = nodeRepository().list() .nodeType(NodeType.tenant, NodeType.proxy).asList().stream() .filter(node -> node.allocation().isPresent()) .collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.toList())); ``` to list all the nodes at once | protected Set<ApplicationId> applicationsNeedingMaintenance() {
List<Node> nodes = nodeRepository().getNodes(NodeType.tenant);
nodes.addAll(nodeRepository().getNodes(NodeType.proxy));
Map<ApplicationId, List<Node>> nodesByApplication = nodes.stream()
.filter(node -> node.allocation().isPresent())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.toList()));
return nodesByApplication.entrySet().stream()
.filter(entry -> hasNodesWithChanges(entry.getKey(), entry.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toCollection(LinkedHashSet::new));
} | List<Node> nodes = nodeRepository().getNodes(NodeType.tenant); | protected Set<ApplicationId> applicationsNeedingMaintenance() {
Map<ApplicationId, List<Node>> nodesByApplication = nodeRepository().list()
.nodeType(NodeType.tenant, NodeType.proxy).asList().stream()
.filter(node -> node.allocation().isPresent())
.collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.toList()));
return nodesByApplication.entrySet().stream()
.filter(entry -> hasNodesWithChanges(entry.getKey(), entry.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toCollection(LinkedHashSet::new));
} | class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
OperatorChangeApplicationMaintainer(Deployer deployer, NodeRepository nodeRepository, Duration interval) {
super(deployer, nodeRepository, interval);
}
@Override
/**
* Deploy in the maintenance thread to avoid scheduling multiple deployments of the same application if it takes
* longer to deploy than the (short) maintenance interval of this
*/
@Override
protected void deploy(ApplicationId application) {
deployWithLock(application);
log.info("Redeployed application " + application.toShortString() +
" as a manual change was made to its nodes");
}
private boolean hasNodesWithChanges(ApplicationId applicationId, List<Node> nodes) {
Optional<Instant> lastDeployTime = deployer().lastDeployTime(applicationId);
if (lastDeployTime.isEmpty()) return false;
return nodes.stream()
.flatMap(node -> node.history().events().stream())
.filter(event -> event.agent() == Agent.operator)
.map(History.Event::at)
.anyMatch(e -> lastDeployTime.get().isBefore(e));
}
} | class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
OperatorChangeApplicationMaintainer(Deployer deployer, NodeRepository nodeRepository, Duration interval) {
super(deployer, nodeRepository, interval);
}
@Override
/**
* Deploy in the maintenance thread to avoid scheduling multiple deployments of the same application if it takes
* longer to deploy than the (short) maintenance interval of this
*/
@Override
protected void deploy(ApplicationId application) {
deployWithLock(application);
log.info("Redeployed application " + application.toShortString() +
" as a manual change was made to its nodes");
}
private boolean hasNodesWithChanges(ApplicationId applicationId, List<Node> nodes) {
Optional<Instant> lastDeployTime = deployer().lastDeployTime(applicationId);
if (lastDeployTime.isEmpty()) return false;
return nodes.stream()
.flatMap(node -> node.history().events().stream())
.filter(event -> event.agent() == Agent.operator)
.map(History.Event::at)
.anyMatch(e -> lastDeployTime.get().isBefore(e));
}
} |
Intentional use of `valueOf`, to fail on bad value. | protected void doExecute() throws MojoFailureException, MojoExecutionException {
loggable = DeploymentLog.Level.valueOf(vespaLogLevel);
Deployment deployment = Deployment.ofPackage(Paths.get(firstNonBlank(applicationZip,
projectPathOf("target", "application.zip"))));
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
ZoneId zone = zoneOf(environment, region);
DeploymentResult result = controller.deploy(deployment, id, zone);
getLog().info(result.message());
if (follow) tailLogs(id, zone, result.run());
} | loggable = DeploymentLog.Level.valueOf(vespaLogLevel); | protected void doExecute() throws MojoFailureException, MojoExecutionException {
loggable = DeploymentLog.Level.valueOf(vespaLogLevel);
Deployment deployment = Deployment.ofPackage(Paths.get(firstNonBlank(applicationZip,
projectPathOf("target", "application.zip"))));
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
ZoneId zone = zoneOf(environment, region);
DeploymentResult result = controller.deploy(deployment, id, zone);
getLog().info(result.message());
if (follow) tailLogs(id, zone, result.run());
} | class DeployMojo extends AbstractVespaDeploymentMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "vespaVersion")
private String vespaVersion;
@Parameter(property = "follow", defaultValue = "true")
private boolean follow;
@Parameter(property = "vespaLogLevel", defaultValue = "error")
private String vespaLogLevel;
private DeploymentLog.Level loggable;
@Override
private void tailLogs(ApplicationId id, ZoneId zone, long run) throws MojoFailureException, MojoExecutionException {
long last = -1;
DeploymentLog log;
while (true) {
log = controller.deploymentLog(id, zone, run, last);
for (DeploymentLog.Entry entry : log.entries())
print(entry);
last = log.last().orElse(last);
if ( ! log.isActive())
break;
try {
Thread.sleep(1000);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
switch (log.status()) {
case success: return;
case error: throw new MojoExecutionException("Unexpected error during deployment; see log for details");
case aborted: throw new MojoFailureException("Deployment was aborted, probably by a newer deployment");
case outOfCapacity: throw new MojoFailureException("No capacity left in zone; please contact the Vespa team");
case deploymentFailed: throw new MojoFailureException("Deployment failed; see log for details");
case installationFailed: throw new MojoFailureException("Installation failed; see Vespa log for details");
case running: throw new MojoFailureException("Deployment not completed");
case testFailure: throw new IllegalStateException("Unexpected status; tests are not run for manual deployments");
default: throw new IllegalArgumentException("Unexpected status '" + log.status() + "'");
}
}
private static final DateTimeFormatter formatter = DateTimeFormatter.ofPattern("HH:mm:ss").withZone(ZoneOffset.UTC);
private static final String padding = "\n" + " ".repeat(23);
private void print(DeploymentLog.Entry entry) {
String timestamp = formatter.format(entry.at());
String message = String.join(padding, entry.message().split("\n"))
.replaceAll("\\s*\n", "\n").trim();
if ( ! entry.isVespaLogEntry() || loggable.compareTo(entry.level()) >= 0)
switch (entry.level()) {
case error : getLog().error(" [" + timestamp + "] " + message); break;
case warning : getLog().warn (" [" + timestamp + "] " + message); break;
case info : getLog().info(" [" + timestamp + "] " + message); break;
default : getLog().debug(" [" + timestamp + "] " + message); break;
}
}
} | class DeployMojo extends AbstractVespaDeploymentMojo {
@Parameter(property = "applicationZip")
private String applicationZip;
@Parameter(property = "vespaVersion")
private String vespaVersion;
@Parameter(property = "follow", defaultValue = "true")
private boolean follow;
@Parameter(property = "vespaLogLevel", defaultValue = "error")
private String vespaLogLevel;
private DeploymentLog.Level loggable;
@Override
private void tailLogs(ApplicationId id, ZoneId zone, long run) throws MojoFailureException, MojoExecutionException {
long last = -1;
DeploymentLog log;
while (true) {
log = controller.deploymentLog(id, zone, run, last);
for (DeploymentLog.Entry entry : log.entries())
print(entry);
last = log.last().orElse(last);
if ( ! log.isActive())
break;
try {
Thread.sleep(1000);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
switch (log.status()) {
case success: return;
case error: throw new MojoExecutionException("Unexpected error during deployment; see log for details");
case aborted: throw new MojoFailureException("Deployment was aborted, probably by a newer deployment");
case outOfCapacity: throw new MojoFailureException("No capacity left in zone; please contact the Vespa team");
case deploymentFailed: throw new MojoFailureException("Deployment failed; see log for details");
case installationFailed: throw new MojoFailureException("Installation failed; see Vespa log for details");
case running: throw new MojoFailureException("Deployment not completed");
case testFailure: throw new IllegalStateException("Unexpected status; tests are not run for manual deployments");
default: throw new IllegalArgumentException("Unexpected status '" + log.status() + "'");
}
}
private static final DateTimeFormatter formatter = DateTimeFormatter.ofPattern("HH:mm:ss").withZone(ZoneOffset.UTC);
private static final String padding = "\n" + " ".repeat(23);
private void print(DeploymentLog.Entry entry) {
String timestamp = formatter.format(entry.at());
String message = String.join(padding, entry.message().split("\n"))
.replaceAll("\\s*\n", "\n").trim();
if ( ! entry.isVespaLogEntry() || loggable.compareTo(entry.level()) >= 0)
switch (entry.level()) {
case error : getLog().error(" [" + timestamp + "] " + message); break;
case warning : getLog().warn (" [" + timestamp + "] " + message); break;
case info : getLog().info(" [" + timestamp + "] " + message); break;
default : getLog().debug(" [" + timestamp + "] " + message); break;
}
}
} |
This only works if you add/patch nodes with host first. Maybe ```suggestion if (node.parentHostname().isPresent() == existingNode.parentHostname().isPresent()) return false; // Not a parent-child node if (node.parentHostname().isEmpty()) return canAssignIpOf(node, existingNode); ``` ? | private static boolean canAssignIpOf(Node existingNode, Node node) {
if (node.parentHostname().isEmpty()) return false;
if (!node.parentHostname().get().equals(existingNode.hostname())) return false;
switch (node.type()) {
case proxy: return existingNode.type() == proxyhost;
case config: return existingNode.type() == confighost;
case controller: return existingNode.type() == controllerhost;
}
return false;
} | if (node.parentHostname().isEmpty()) return false; | private static boolean canAssignIpOf(Node existingNode, Node node) {
if (node.parentHostname().isPresent() == existingNode.parentHostname().isPresent()) return false;
if (node.parentHostname().isEmpty()) return canAssignIpOf(node, existingNode);
if (!node.parentHostname().get().equals(existingNode.hostname())) return false;
switch (node.type()) {
case proxy: return existingNode.type() == proxyhost;
case config: return existingNode.type() == confighost;
case controller: return existingNode.type() == controllerhost;
}
return false;
} | class Config {
public static final Config EMPTY = new Config(Set.of(), Set.of());
private final Set<String> primary;
private final Pool pool;
/** DO NOT USE in non-test code. Public for serialization purposes. */
public Config(Set<String> primary, Set<String> pool) {
this.primary = ImmutableSet.copyOf(Objects.requireNonNull(primary, "primary must be non-null"));
this.pool = new Pool(Objects.requireNonNull(pool, "pool must be non-null"));
}
/** The primary addresses of this. These addresses are used when communicating with the node itself */
public Set<String> primary() {
return primary;
}
/** Returns the IP address pool available on a node */
public Pool pool() {
return pool;
}
/** Returns a copy of this with pool set to given value */
public Config with(Pool pool) {
return new Config(primary, pool.asSet());
}
/** Returns a copy of this with pool set to given value */
public Config with(Set<String> primary) {
return new Config(require(primary), pool.asSet());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Config config = (Config) o;
return primary.equals(config.primary) &&
pool.equals(config.pool);
}
@Override
public int hashCode() {
return Objects.hash(primary, pool);
}
@Override
public String toString() {
return String.format("ip config primary=%s pool=%s", primary, pool.asSet());
}
/** Validates and returns the given addresses */
public static Set<String> require(Set<String> addresses) {
try {
addresses.forEach(InetAddresses::forString);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Found one or more invalid addresses in " + addresses, e);
}
return addresses;
}
/**
* Verify IP config of given nodes
*
* @throws IllegalArgumentException if there are IP conflicts with existing nodes
*/
public static List<Node> verify(List<Node> nodes, LockedNodeList allNodes) {
for (var node : nodes) {
for (var other : allNodes) {
if (node.equals(other)) continue;
if (canAssignIpOf(other, node)) continue;
var addresses = new HashSet<>(node.ipConfig().primary());
var otherAddresses = new HashSet<>(other.ipConfig().primary());
if (node.type().isDockerHost()) {
addresses.addAll(node.ipConfig().pool().asSet());
otherAddresses.addAll(other.ipConfig().pool().asSet());
}
otherAddresses.retainAll(addresses);
if (!otherAddresses.isEmpty())
throw new IllegalArgumentException("Cannot assign " + addresses + " to " + node.hostname() +
": " + otherAddresses + " already assigned to " +
other.hostname());
}
}
return nodes;
}
/** Returns whether IP address of existing node can be assigned to node */
public static Node verify(Node node, LockedNodeList allNodes) {
return verify(List.of(node), allNodes).get(0);
}
} | class Config {
public static final Config EMPTY = new Config(Set.of(), Set.of());
private final Set<String> primary;
private final Pool pool;
/** DO NOT USE in non-test code. Public for serialization purposes. */
public Config(Set<String> primary, Set<String> pool) {
this.primary = ImmutableSet.copyOf(Objects.requireNonNull(primary, "primary must be non-null"));
this.pool = new Pool(Objects.requireNonNull(pool, "pool must be non-null"));
}
/** The primary addresses of this. These addresses are used when communicating with the node itself */
public Set<String> primary() {
return primary;
}
/** Returns the IP address pool available on a node */
public Pool pool() {
return pool;
}
/** Returns a copy of this with pool set to given value */
public Config with(Pool pool) {
return new Config(primary, pool.asSet());
}
/** Returns a copy of this with pool set to given value */
public Config with(Set<String> primary) {
return new Config(require(primary), pool.asSet());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Config config = (Config) o;
return primary.equals(config.primary) &&
pool.equals(config.pool);
}
@Override
public int hashCode() {
return Objects.hash(primary, pool);
}
@Override
public String toString() {
return String.format("ip config primary=%s pool=%s", primary, pool.asSet());
}
/** Validates and returns the given addresses */
public static Set<String> require(Set<String> addresses) {
try {
addresses.forEach(InetAddresses::forString);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Found one or more invalid addresses in " + addresses, e);
}
return addresses;
}
/**
* Verify IP config of given nodes
*
* @throws IllegalArgumentException if there are IP conflicts with existing nodes
*/
public static List<Node> verify(List<Node> nodes, LockedNodeList allNodes) {
for (var node : nodes) {
for (var other : allNodes) {
if (node.equals(other)) continue;
if (canAssignIpOf(other, node)) continue;
var addresses = new HashSet<>(node.ipConfig().primary());
var otherAddresses = new HashSet<>(other.ipConfig().primary());
if (node.type().isDockerHost()) {
addresses.addAll(node.ipConfig().pool().asSet());
otherAddresses.addAll(other.ipConfig().pool().asSet());
}
otherAddresses.retainAll(addresses);
if (!otherAddresses.isEmpty())
throw new IllegalArgumentException("Cannot assign " + addresses + " to " + node.hostname() +
": " + otherAddresses + " already assigned to " +
other.hostname());
}
}
return nodes;
}
/** Returns whether IP address of existing node can be assigned to node */
public static Node verify(Node node, LockedNodeList allNodes) {
return verify(List.of(node), allNodes).get(0);
}
} |
```suggestion boolean useHttp = controller.system().isPublic() ``` | Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
boolean useHttp = controller.system().isPublic()
&& ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value();
return controller.applications().getDeploymentEndpoints(testerId)
.stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()))
.map(uri -> useHttp ? URI.create("http:
} | boolean useHttp = controller.system().isPublic() | Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
boolean useHttp = controller.system().isPublic()
&& ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value();
return controller.applications().getDeploymentEndpoints(testerId)
.stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()))
.map(uri -> useHttp ? URI.create("http:
} | class JobController {
private static final int historyLength = 256;
private static final Duration maxHistoryAge = Duration.ofDays(60);
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
private final BooleanFlag directRoutingUseHttps;
private AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller, FlagSource flagSource) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.directRoutingUseHttps = Flags.DIRECT_ROUTING_USE_HTTPS_4443.bindTo(flagSource);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.steps().containsKey(copyVespaLogs))
return run;
ZoneId zone = id.type().zone(controller.system());
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at();
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.put(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
application = registered(application);
long run = nextBuild(id);
if (applicationPackage.compileVersion().isPresent() && applicationPackage.buildTime().isPresent())
version.set(ApplicationVersion.from(revision, run, authorEmail,
applicationPackage.compileVersion().get(),
applicationPackage.buildTime().get()));
else
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
applicationPackage.zippedContent());
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application, applicationPackage);
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
private LockedApplication registered(LockedApplication application) {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
locked(id, type, __ -> {
controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
start(id, type, new Versions(platform.orElse(controller.systemVersion()),
ApplicationVersion.unknown,
Optional.empty(),
Optional.empty()));
runner.get().accept(last(id, type).get());
});
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id);
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NotFoundException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() > 0)
lastCompleted = runs.get(runs.size() - 1);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
}
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
controller.applications().require(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
private static final int historyLength = 256;
private static final Duration maxHistoryAge = Duration.ofDays(60);
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
private final BooleanFlag directRoutingUseHttps;
private AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller, FlagSource flagSource) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.directRoutingUseHttps = Flags.DIRECT_ROUTING_USE_HTTPS_4443.bindTo(flagSource);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.steps().containsKey(copyVespaLogs))
return run;
ZoneId zone = id.type().zone(controller.system());
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at();
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.put(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
application = registered(application);
long run = nextBuild(id);
if (applicationPackage.compileVersion().isPresent() && applicationPackage.buildTime().isPresent())
version.set(ApplicationVersion.from(revision, run, authorEmail,
applicationPackage.compileVersion().get(),
applicationPackage.buildTime().get()));
else
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
applicationPackage.zippedContent());
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application, applicationPackage);
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
private LockedApplication registered(LockedApplication application) {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
locked(id, type, __ -> {
controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
start(id, type, new Versions(platform.orElse(controller.systemVersion()),
ApplicationVersion.unknown,
Optional.empty(),
Optional.empty()));
runner.get().accept(last(id, type).get());
});
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id);
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NotFoundException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() > 0)
lastCompleted = runs.get(runs.size() - 1);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
}
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
controller.applications().require(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
Sure, symmetry is great. | private static boolean canAssignIpOf(Node existingNode, Node node) {
if (node.parentHostname().isEmpty()) return false;
if (!node.parentHostname().get().equals(existingNode.hostname())) return false;
switch (node.type()) {
case proxy: return existingNode.type() == proxyhost;
case config: return existingNode.type() == confighost;
case controller: return existingNode.type() == controllerhost;
}
return false;
} | if (node.parentHostname().isEmpty()) return false; | private static boolean canAssignIpOf(Node existingNode, Node node) {
if (node.parentHostname().isPresent() == existingNode.parentHostname().isPresent()) return false;
if (node.parentHostname().isEmpty()) return canAssignIpOf(node, existingNode);
if (!node.parentHostname().get().equals(existingNode.hostname())) return false;
switch (node.type()) {
case proxy: return existingNode.type() == proxyhost;
case config: return existingNode.type() == confighost;
case controller: return existingNode.type() == controllerhost;
}
return false;
} | class Config {
public static final Config EMPTY = new Config(Set.of(), Set.of());
private final Set<String> primary;
private final Pool pool;
/** DO NOT USE in non-test code. Public for serialization purposes. */
public Config(Set<String> primary, Set<String> pool) {
this.primary = ImmutableSet.copyOf(Objects.requireNonNull(primary, "primary must be non-null"));
this.pool = new Pool(Objects.requireNonNull(pool, "pool must be non-null"));
}
/** The primary addresses of this. These addresses are used when communicating with the node itself */
public Set<String> primary() {
return primary;
}
/** Returns the IP address pool available on a node */
public Pool pool() {
return pool;
}
/** Returns a copy of this with pool set to given value */
public Config with(Pool pool) {
return new Config(primary, pool.asSet());
}
/** Returns a copy of this with pool set to given value */
public Config with(Set<String> primary) {
return new Config(require(primary), pool.asSet());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Config config = (Config) o;
return primary.equals(config.primary) &&
pool.equals(config.pool);
}
@Override
public int hashCode() {
return Objects.hash(primary, pool);
}
@Override
public String toString() {
return String.format("ip config primary=%s pool=%s", primary, pool.asSet());
}
/** Validates and returns the given addresses */
public static Set<String> require(Set<String> addresses) {
try {
addresses.forEach(InetAddresses::forString);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Found one or more invalid addresses in " + addresses, e);
}
return addresses;
}
/**
* Verify IP config of given nodes
*
* @throws IllegalArgumentException if there are IP conflicts with existing nodes
*/
public static List<Node> verify(List<Node> nodes, LockedNodeList allNodes) {
for (var node : nodes) {
for (var other : allNodes) {
if (node.equals(other)) continue;
if (canAssignIpOf(other, node)) continue;
var addresses = new HashSet<>(node.ipConfig().primary());
var otherAddresses = new HashSet<>(other.ipConfig().primary());
if (node.type().isDockerHost()) {
addresses.addAll(node.ipConfig().pool().asSet());
otherAddresses.addAll(other.ipConfig().pool().asSet());
}
otherAddresses.retainAll(addresses);
if (!otherAddresses.isEmpty())
throw new IllegalArgumentException("Cannot assign " + addresses + " to " + node.hostname() +
": " + otherAddresses + " already assigned to " +
other.hostname());
}
}
return nodes;
}
/** Returns whether IP address of existing node can be assigned to node */
public static Node verify(Node node, LockedNodeList allNodes) {
return verify(List.of(node), allNodes).get(0);
}
} | class Config {
public static final Config EMPTY = new Config(Set.of(), Set.of());
private final Set<String> primary;
private final Pool pool;
/** DO NOT USE in non-test code. Public for serialization purposes. */
public Config(Set<String> primary, Set<String> pool) {
this.primary = ImmutableSet.copyOf(Objects.requireNonNull(primary, "primary must be non-null"));
this.pool = new Pool(Objects.requireNonNull(pool, "pool must be non-null"));
}
/** The primary addresses of this. These addresses are used when communicating with the node itself */
public Set<String> primary() {
return primary;
}
/** Returns the IP address pool available on a node */
public Pool pool() {
return pool;
}
/** Returns a copy of this with pool set to given value */
public Config with(Pool pool) {
return new Config(primary, pool.asSet());
}
/** Returns a copy of this with pool set to given value */
public Config with(Set<String> primary) {
return new Config(require(primary), pool.asSet());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Config config = (Config) o;
return primary.equals(config.primary) &&
pool.equals(config.pool);
}
@Override
public int hashCode() {
return Objects.hash(primary, pool);
}
@Override
public String toString() {
return String.format("ip config primary=%s pool=%s", primary, pool.asSet());
}
/** Validates and returns the given addresses */
public static Set<String> require(Set<String> addresses) {
try {
addresses.forEach(InetAddresses::forString);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Found one or more invalid addresses in " + addresses, e);
}
return addresses;
}
/**
* Verify IP config of given nodes
*
* @throws IllegalArgumentException if there are IP conflicts with existing nodes
*/
public static List<Node> verify(List<Node> nodes, LockedNodeList allNodes) {
for (var node : nodes) {
for (var other : allNodes) {
if (node.equals(other)) continue;
if (canAssignIpOf(other, node)) continue;
var addresses = new HashSet<>(node.ipConfig().primary());
var otherAddresses = new HashSet<>(other.ipConfig().primary());
if (node.type().isDockerHost()) {
addresses.addAll(node.ipConfig().pool().asSet());
otherAddresses.addAll(other.ipConfig().pool().asSet());
}
otherAddresses.retainAll(addresses);
if (!otherAddresses.isEmpty())
throw new IllegalArgumentException("Cannot assign " + addresses + " to " + node.hostname() +
": " + otherAddresses + " already assigned to " +
other.hostname());
}
}
return nodes;
}
/** Returns whether IP address of existing node can be assigned to node */
public static Node verify(Node node, LockedNodeList allNodes) {
return verify(List.of(node), allNodes).get(0);
}
} |
```suggestion && ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value(); ``` Had to try this. | Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
boolean useHttp = controller.system().isPublic()
&& !directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, testerId.applicationId().serializedForm()).value();
return controller.applications().getDeploymentEndpoints(testerId)
.stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()))
.map(uri -> useHttp ? URI.create("http:
} | && !directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, testerId.applicationId().serializedForm()).value(); | Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
boolean useHttp = controller.system().isPublic()
&& ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value();
return controller.applications().getDeploymentEndpoints(testerId)
.stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()))
.map(uri -> useHttp ? URI.create("http:
} | class JobController {
private static final int historyLength = 256;
private static final Duration maxHistoryAge = Duration.ofDays(60);
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
private final BooleanFlag directRoutingUseHttps;
private AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller, FlagSource flagSource) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.directRoutingUseHttps = Flags.DIRECT_ROUTING_USE_HTTPS_4443.bindTo(flagSource);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.steps().containsKey(copyVespaLogs))
return run;
ZoneId zone = id.type().zone(controller.system());
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at();
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.put(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
application = registered(application);
long run = nextBuild(id);
if (applicationPackage.compileVersion().isPresent() && applicationPackage.buildTime().isPresent())
version.set(ApplicationVersion.from(revision, run, authorEmail,
applicationPackage.compileVersion().get(),
applicationPackage.buildTime().get()));
else
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
applicationPackage.zippedContent());
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application, applicationPackage);
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
private LockedApplication registered(LockedApplication application) {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
locked(id, type, __ -> {
controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
start(id, type, new Versions(platform.orElse(controller.systemVersion()),
ApplicationVersion.unknown,
Optional.empty(),
Optional.empty()));
runner.get().accept(last(id, type).get());
});
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id);
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NotFoundException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() > 0)
lastCompleted = runs.get(runs.size() - 1);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
}
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
controller.applications().require(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
private static final int historyLength = 256;
private static final Duration maxHistoryAge = Duration.ofDays(60);
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
private final BooleanFlag directRoutingUseHttps;
private AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller, FlagSource flagSource) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.directRoutingUseHttps = Flags.DIRECT_ROUTING_USE_HTTPS_4443.bindTo(flagSource);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.steps().containsKey(copyVespaLogs))
return run;
ZoneId zone = id.type().zone(controller.system());
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at();
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.put(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
application = registered(application);
long run = nextBuild(id);
if (applicationPackage.compileVersion().isPresent() && applicationPackage.buildTime().isPresent())
version.set(ApplicationVersion.from(revision, run, authorEmail,
applicationPackage.compileVersion().get(),
applicationPackage.buildTime().get()));
else
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
applicationPackage.zippedContent());
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application, applicationPackage);
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
private LockedApplication registered(LockedApplication application) {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
locked(id, type, __ -> {
controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
start(id, type, new Versions(platform.orElse(controller.systemVersion()),
ApplicationVersion.unknown,
Optional.empty(),
Optional.empty()));
runner.get().accept(last(id, type).get());
});
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id);
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NotFoundException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() > 0)
lastCompleted = runs.get(runs.size() - 1);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
}
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
controller.applications().require(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
```suggestion boolean useHttp = controller.system().isPublic() && ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value(); ``` | Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
boolean useHttp = controller.system().isPublic()
&& ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value();
return controller.applications().getDeploymentEndpoints(testerId)
.stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()))
.map(uri -> useHttp ? URI.create("http:
} | && ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value(); | Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
boolean useHttp = controller.system().isPublic()
&& ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value();
return controller.applications().getDeploymentEndpoints(testerId)
.stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()))
.map(uri -> useHttp ? URI.create("http:
} | class JobController {
private static final int historyLength = 256;
private static final Duration maxHistoryAge = Duration.ofDays(60);
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
private final BooleanFlag directRoutingUseHttps;
private AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller, FlagSource flagSource) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.directRoutingUseHttps = Flags.DIRECT_ROUTING_USE_HTTPS_4443.bindTo(flagSource);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.steps().containsKey(copyVespaLogs))
return run;
ZoneId zone = id.type().zone(controller.system());
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at();
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.put(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
application = registered(application);
long run = nextBuild(id);
if (applicationPackage.compileVersion().isPresent() && applicationPackage.buildTime().isPresent())
version.set(ApplicationVersion.from(revision, run, authorEmail,
applicationPackage.compileVersion().get(),
applicationPackage.buildTime().get()));
else
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
applicationPackage.zippedContent());
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application, applicationPackage);
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
private LockedApplication registered(LockedApplication application) {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
locked(id, type, __ -> {
controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
start(id, type, new Versions(platform.orElse(controller.systemVersion()),
ApplicationVersion.unknown,
Optional.empty(),
Optional.empty()));
runner.get().accept(last(id, type).get());
});
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id);
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NotFoundException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() > 0)
lastCompleted = runs.get(runs.size() - 1);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
}
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
controller.applications().require(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
private static final int historyLength = 256;
private static final Duration maxHistoryAge = Duration.ofDays(60);
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
private final BooleanFlag directRoutingUseHttps;
private AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller, FlagSource flagSource) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.directRoutingUseHttps = Flags.DIRECT_ROUTING_USE_HTTPS_4443.bindTo(flagSource);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.steps().containsKey(copyVespaLogs))
return run;
ZoneId zone = id.type().zone(controller.system());
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at();
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.put(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
application = registered(application);
long run = nextBuild(id);
if (applicationPackage.compileVersion().isPresent() && applicationPackage.buildTime().isPresent())
version.set(ApplicationVersion.from(revision, run, authorEmail,
applicationPackage.compileVersion().get(),
applicationPackage.buildTime().get()));
else
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
applicationPackage.zippedContent());
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application, applicationPackage);
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
private LockedApplication registered(LockedApplication application) {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
locked(id, type, __ -> {
controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
start(id, type, new Versions(platform.orElse(controller.systemVersion()),
ApplicationVersion.unknown,
Optional.empty(),
Optional.empty()));
runner.get().accept(last(id, type).get());
});
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id);
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NotFoundException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() > 0)
lastCompleted = runs.get(runs.size() - 1);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
}
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
controller.applications().require(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
```suggestion ``` | Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
boolean useHttp = controller.system().isPublic()
boolean useHttp = controller.system().isPublic()
&& ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value();
return controller.applications().getDeploymentEndpoints(testerId)
.stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()))
.map(uri -> useHttp ? URI.create("http:
} | boolean useHttp = controller.system().isPublic() | Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
boolean useHttp = controller.system().isPublic()
&& ! directRoutingUseHttps.with(FetchVector.Dimension.APPLICATION_ID, id.tester().id().serializedForm()).value();
return controller.applications().getDeploymentEndpoints(testerId)
.stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()))
.map(uri -> useHttp ? URI.create("http:
} | class JobController {
private static final int historyLength = 256;
private static final Duration maxHistoryAge = Duration.ofDays(60);
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
private final BooleanFlag directRoutingUseHttps;
private AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller, FlagSource flagSource) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.directRoutingUseHttps = Flags.DIRECT_ROUTING_USE_HTTPS_4443.bindTo(flagSource);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.steps().containsKey(copyVespaLogs))
return run;
ZoneId zone = id.type().zone(controller.system());
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at();
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.put(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
application = registered(application);
long run = nextBuild(id);
if (applicationPackage.compileVersion().isPresent() && applicationPackage.buildTime().isPresent())
version.set(ApplicationVersion.from(revision, run, authorEmail,
applicationPackage.compileVersion().get(),
applicationPackage.buildTime().get()));
else
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
applicationPackage.zippedContent());
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application, applicationPackage);
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
private LockedApplication registered(LockedApplication application) {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
locked(id, type, __ -> {
controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
start(id, type, new Versions(platform.orElse(controller.systemVersion()),
ApplicationVersion.unknown,
Optional.empty(),
Optional.empty()));
runner.get().accept(last(id, type).get());
});
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id);
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NotFoundException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() > 0)
lastCompleted = runs.get(runs.size() - 1);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
}
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
controller.applications().require(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} | class JobController {
private static final int historyLength = 256;
private static final Duration maxHistoryAge = Duration.ofDays(60);
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final Badges badges;
private final BooleanFlag directRoutingUseHttps;
private AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller, FlagSource flagSource) {
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.directRoutingUseHttps = Flags.DIRECT_ROUTING_USE_HTTPS_4443.bindTo(flagSource);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : applications())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Lock __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.steps().containsKey(copyVespaLogs))
return run;
ZoneId zone = id.type().zone(controller.system());
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at();
List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log);
return run.with(log.get(log.size() - 1).at());
});
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
if ( ! run.readySteps().contains(endTests))
return run;
Optional<URI> testerEndpoint = testerEndpoint(id);
if ( ! testerEndpoint.isPresent())
return run;
List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), endTests, entries);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all application which have registered. */
public List<ApplicationId> applications() {
return copyOf(controller.applications().asList().stream()
.filter(application -> application.deploymentJobs().deployedInternally())
.map(Application::id)
.iterator());
}
/** Returns all job types which have been run for the given application. */
public List<JobType> jobs(ApplicationId id) {
return copyOf(Stream.of(JobType.values())
.filter(type -> last(id, type).isPresent())
.iterator());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public Map<RunId, Run> runs(ApplicationId id, JobType type) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
last(id, type).ifPresent(run -> runs.put(run.id(), run));
return ImmutableMap.copyOf(runs);
}
/** Returns the run with the given id, if it exists. */
public Optional<Run> run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny();
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return copyOf(applications().stream()
.flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/** Changes the status of the given run to inactive, and stores it as a historic run. */
public void finish(RunId id) {
locked(id, run -> {
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
return finishedRun;
});
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id) {
locked(id, run -> run.aborted());
}
/**
* Accepts and stores a new application package and test jar pair under a generated application version key.
*/
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
application = registered(application);
long run = nextBuild(id);
if (applicationPackage.compileVersion().isPresent() && applicationPackage.buildTime().isPresent())
version.set(ApplicationVersion.from(revision, run, authorEmail,
applicationPackage.compileVersion().get(),
applicationPackage.buildTime().get()));
else
version.set(ApplicationVersion.from(revision, run, authorEmail));
controller.applications().applicationStore().put(id,
version.get(),
applicationPackage.zippedContent());
controller.applications().applicationStore().put(TesterId.of(id),
version.get(),
testPackageBytes);
prunePackages(id);
controller.applications().storeWithUpdatedConfig(application, applicationPackage);
controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get()));
});
return version.get();
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
private LockedApplication registered(LockedApplication application) {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.distinct()
.forEach(appVersion -> {
byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
controller.applications().applicationStore().put(application.get().id(), appVersion, content);
});
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions) {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
controller.applications().lockIfPresent(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant()));
});
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
controller.applications().lockOrThrow(id, application -> {
if ( ! application.get().deploymentJobs().deployedInternally())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
locked(id, type, __ -> {
controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
start(id, type, new Versions(platform.orElse(controller.systemVersion()),
ApplicationVersion.unknown,
Optional.empty(),
Optional.empty()));
runner.get().accept(last(id, type).get());
});
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id) {
abort(id);
runner.get().accept(last(id.application(), id.type()).get());
while ( ! last(id.application(), id.type()).get().hasEnded()) {
try {
Thread.sleep(100);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Lock ___ = curator.lock(id, type)) {
deactivateTester(tester, type);
curator.deleteRunData(id, type);
logs.delete(id);
}
});
}
catch (TimeoutException e) {
return;
}
curator.deleteRunData(id);
});
}
public void deactivateTester(TesterId id, JobType type) {
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system())));
}
catch (NotFoundException ignored) {
}
}
/** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
List<Run> runs = new ArrayList<>(runs(id, type).values());
Run lastCompleted = null;
if (runs.size() > 0)
lastCompleted = runs.get(runs.size() - 1);
if (runs.size() > 1 && ! lastCompleted.hasEnded())
lastCompleted = runs.get(runs.size() - 2);
return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
}
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.collect(toList()));
}
/** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
controller.applications().require(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
.orElse(0L);
}
private void prunePackages(ApplicationId id) {
controller.applications().lockIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id, oldestDeployed);
controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed);
});
});
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Lock __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type);
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Lock __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
run = modifications.apply(run);
curator.writeLastRun(run);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Lock lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.prerequisites())
try (Lock __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.